repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
facaiy/spark
|
python/setup.py
|
4
|
10245
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.8.1'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
|
apache-2.0
|
dennisss/tansa
|
scripts/plot.py
|
1
|
1210
|
#!/usr/bin/env python
# Used to nicely plot data from the `log` folder
import numpy as np
import matplotlib.pyplot as plt
import math
FILE = 'log/20170408-14_52_11.csv'
ROLE = 0
data = np.genfromtxt(FILE, delimiter=',', dtype=float)
#data = data[-10000:,:]
print(data.shape)
fig, axarr = plt.subplots(2, 1)
plot1 = axarr[0]
plot2 = axarr[1]
#plt.subplot(211)
off = 6*ROLE
# Target
plot1.plot(data[:, (off+1)], data[:, (off+2)], 'r--')
# Actual
plot1.plot(data[:, (off+4)], data[:, (off+5)], 'b')
#plt.plot(x, magX, 'b')
#plt.plot(x, p(current), 'r--')
plot1.set_ylim(-2, 2)
plot1.set_xlim(-3, 3)
plot1.legend(['Target Position', 'Actual Position'], prop={'size':9})
plot1.set_title('Position (meters)')
plot1.set_aspect('equal', adjustable='box')
#plt.subplot(212)
#plt.show()
e = data[:,(off+1):(off+4)] - data[:, (off+4):(off+7)]
plot2.plot(data[:, 0], e[:, 0], 'r')
plot2.plot(data[:, 0], e[:, 1], 'g')
plot2.plot(data[:, 0], e[:, 2], 'b')
plot2.set_ylim(-0.5, 0.5)
plot2.set_title('Axis Errors (meters)')
plot2.plot([0, data[-1, 0]], [0,0], '--', color='0.75')
plot2.legend(['X', 'Y', 'Z', 'Zero Line'], prop={'size':9})
fig.tight_layout()
plt.show()
#plt.plot(data[:,1:3],)
|
mit
|
COHRINT/cops_and_robots
|
src/cops_and_robots/human_tools/nlp/templater.py
|
1
|
13920
|
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
from collections import OrderedDict
from scipy.stats import dirichlet
import numpy as np
from cops_and_robots.human_tools.statement_template import StatementTemplate
class TDC(object):
"""Target description clause.
A target description clause provides a formal structure for semantically
tagged natural language.
Parameters
----------
type_ : str, optional
The template type for the TDC.
phrase : str, optional
The untagged input phrase associated with the TDC.
Attributes
----------
templates : dict, class attr
The list of tags that define a type of TDC, i.e. 'action' and
'spatial relation' have different sets of tags.
type : str
The template type.
"""
def __init__(self, tagged_phrase='', show_children=False):
self.tagged_phrase = tagged_phrase
self.phrase = " ".join([s[0] for s in tagged_phrase])
self.parsing = {}
self.show_children = show_children
self.child_TDCs = []
# List possible templates
self.templates = []
for template_type, templates in StatementTemplate.template_trees.iteritems():
for j, template_keys in enumerate(templates):
empty_template = OrderedDict(zip(template_keys,
[''] * len(template_keys)))
t = (template_type + str(j), empty_template)
self.templates.append(t)
self.template_counts = [1] * len(self.templates)
# Generate likelihoods of each template based on the input phrase
self.fit_phrase()
self.fill_in_defaults()
self.prune()
def __repr__(self):
expected_template = self.get_expected_templates()[0]
str_ = "{} TDC: {}".format(expected_template[0].title(), expected_template[1])
# if self.show_children:
# if len(self.child_TDCs) > 0:
# str_ += " with children: "
# for child_TDC in self.child_TDCs:
# str_ += child_TDC.__repr__()
return str_
def fit_phrase(self):
"""Fit the tagged_phrase into possible templates
This also generates counts of observed labels for each template,
where the counts are normalized by the template lengths. Each template
is modeled as its own dirichlet distribution with dimension equal to
the number of labels in the template. The expected values of these
dirichlets can be compared to identify the most likely template(s).
"""
for i, template in enumerate(self.templates):
for tagged_token in self.tagged_phrase:
token = tagged_token[0]
tag = tagged_token[1]
for template_key in template[1].keys():
#<>TODO: Fix CRF labels instead of this modification
key = template_key
if 'spatial_relation' in template_key:
key = template_key.replace('_', '')
# Fill template and add count (normalized by template length)
if tag.upper() in key.upper():
if template[1][template_key] != '':
self.add_to_child_TDC(tag, token)
else:
template[1][template_key] = token
self.template_counts[i] += 1 / len(template[1])
#<>TODO: validate normalization over temp. length
self.template_expected_probs = dirichlet.mean(self.template_counts)
def add_to_child_TDC(self, tag, token):
# If no child TDCs exist, make new child TDC
# If child TDCs exist, add tag/token pair to first empty one
# Add in default values
# Take parent values
pass
def fill_in_defaults(self):
for template in self.templates:
for template_key in template[1].keys():
if template[1][template_key] == '':
try:
def_ = StatementTemplate.default_components[template_key]
template[1][template_key] = def_
except KeyError:
logging.debug("no {} default".format(template_key))
def prune(self):
for template in self.templates:
for template_key in template[1].keys():
if template_key == '':
del template[1]['']
def get_expected_templates(self):
# Find the most likely template(s) as the suggested TDC
i = np.argmax(self.template_expected_probs)
templates = [self.templates[i]]
for child in self.child_TDCs:
templates.append(child.get_expected_templates())
return templates
# def _check_input(self, input_span, true_span_tag, TDC_type=''):
# # Check for template matching
# is_conflicting = ((true_span_tag not in self.template)
# and len(self.template) > 0)
# # Check for duplicate spans
# current_span = self.get_spans_from_tags([true_span_tag])[0][0]
# has_duplicate = len(current_span) > 0
# # Add to/new child TDC if necessary
# if is_conflicting or has_duplicate:
# if len(self.child_TDCs) > 0:
# self.child_TDCs[0].add_span([input_span, true_span_tag])
# else:
# new_tdc = TDC(type_=TDC_type, phrase=self.phrase)
# new_tdc.add_span([input_span, true_span_tag])
# logging.debug("Adding a child TDC to this {} --> {}"
# .format(self, new_tdc))
# self.child_TDCs.append(new_tdc)
# return False
# # Set template and type
# if len(TDC_type) > 0:
# self.type = TDC_type
# if len(self.type) > 0:
# self.template = TDC.templates[self.type]
# return True
# def add_span(self, tagged_span=[], span='', span_tag=''):
# if len(tagged_span) > 0:
# span, span_tag = tagged_span
# if span_tag == 'NULL':
# logging.debug("Skipping null spans.")
# return
# if len(tagged_span) == 1 or len(tagged_span) > 2:
# logging.warning("You must specify a word and its semantic tag!")
# return
# if span_tag == 'TARGET':
# self.target = span
# elif span_tag == 'POSITIVITY':
# self.positivity = span
# elif span_tag == 'GROUNDING':
# self.grounding = span
# elif span_tag == 'SPATIALRELATION':
# self.spatial_relation = span
# elif span_tag == 'ACTION':
# self.action = span
# elif span_tag == 'MODIFIER':
# self.modifier = span
# def get_spans_from_tags(self, span_tag, include_empty_spans=True):
# if type(span_tag) is not type(list()):
# span_tag = [span_tag]
# parsed_spans = []
# for span_tag in span_tag:
# if span_tag == 'TARGET':
# span = self.target
# elif span_tag == 'POSITIVITY':
# span = self.positivity
# elif span_tag == 'GROUNDING':
# span = self.grounding
# elif span_tag == 'SPATIALRELATION':
# span = self.spatial_relation
# elif span_tag == 'ACTION':
# span = self.action
# elif span_tag == 'MODIFIER':
# span = self.modifier
# if not include_empty_spans and span == '':
# continue
# parsed_spans.append([span, span_tag])
# return parsed_spans
# def to_tagged_phrase(self):
# return [[word, tag] for tag, word in self.parsing.iteritems()]
class TDC_Collection(object):
"""A collection of TDCs
"""
def __init__(self, tagged_document):
self.TDCs = []
self.parse_tagged_document(tagged_document)
self.tag_order = {'TARGET': 0,
'POSITIVITY': 1,
'SPATIALRELATION': 2,
'ACTION': 3,
'MODIFIER': 4,
'GROUNDING': 5,
}
def parse_tagged_document(self, tagged_document):
"""Generate a complete parsing of a tagged document.
Parameters
----------
tagged_document : array_like
A 2-by-n array of tokens and tags.
"""
tagged_phrases = self.split_phrases(tagged_document)
self.get_TDCs_from_phrases(tagged_phrases)
self.flatten_TDCs() #<>TODO
self.prune_TDCs() #<>TODO
def split_phrases(self, tagged_document):
"""Identify individual phrases in a tagged document"""
phrases = []
phrase = []
for tagged_phrase in tagged_document:
if tagged_phrase[0] not in ['.', '!', '?']:
phrase.append(tagged_phrase)
else:
phrases.append(phrase)
phrase = []
if len(phrase) > 0:
phrases.append(phrase)
return phrases
def get_TDCs_from_phrases(self, tagged_phrases):
"""Create TDCs for each phrase"""
for tagged_phrase in tagged_phrases:
tdc = TDC(tagged_phrase=tagged_phrase)
self.TDCs.append(tdc)
def flatten_TDCs(self, remove_children=False):
"""Flatten TDC list, optionally removing children"""
return
child_TDCs = []
for tdc in self.TDCs:
for i, child_TDC in enumerate(tdc.child_TDCs):
child_TDCs.append(child_TDC)
if remove_children:
del tdc.child_TDCs[i]
self.TDCs += child_TDCs
def prune_TDCs(self):
"""Prune incomplete TDCs"""
return
for i, tdc in enumerate(self.TDCs):
if tdc.type == '':
del self.TDCs[i]
continue
tags = TDC.templates[tdc.type]
parsed_spans = tdc.get_spans_from_tags(tags, include_empty_spans=False)
empty_spans = [parsed_span[1] for parsed_span in parsed_spans
if len(parsed_span[0]) == 0]
if len(empty_spans) > 0:
del self.TDCs[i]
def print_TDCs(self, print_phrases=True):
for tdc in self.TDCs:
if print_phrases:
logging.info(tdc.phrase)
logging.info(tdc)
logging.info('')
def plot_TDCs(self, filename='TDC Graph', scale=0.9, aspect=3.0):
from matplotlib import rc
import daft
rc("font", size=6)
rc("text", usetex=False)
num_TDCs = len(self.TDCs)
shape = [3 * scale * aspect, scale * num_TDCs * 3.75]
last_y = shape[1]
# Instantiate the PGM
pgm = daft.PGM(shape, origin=[0.2, 0.2], directed=False, aspect=aspect)
for i, TDC in enumerate(self.TDCs):
# Create the TDC node
TDC_i = i + 1
TDC_name = TDC.type + " TDC" + str(TDC_i)
num_tags = len(TDC.parsing)
TDC_x = scale
TDC_y = last_y - scale * num_tags/2
pgm.add_node(daft.Node(TDC_name, TDC_name, TDC_x, TDC_y, scale=scale))
# Create the tags and spans
tag_i = 0
sorted_tags = sorted(TDC.parsing, key=lambda x:self.tag_order[x])
for tag_, tag in enumerate(sorted_tags):
span = TDC.parsing[tag]
tag_name = tag + str(TDC_i)
tag_x = TDC_x + aspect * scale * 0.7
tag_y = last_y - scale - tag_i * 0.7 * scale
pgm.add_node(daft.Node(tag_name, tag, tag_x, tag_y, scale=scale))
span_name = span + str(TDC_i)
span_x = tag_x + aspect * scale * 0.7
span_y = tag_y
pgm.add_node(daft.Node(span_name, span, span_x, span_y, scale=scale))
# Add in the edges.
pgm.add_edge(TDC_name, tag_name)
pgm.add_edge(tag_name, span_name)
tag_i += 1
last_y = tag_y
# Render and save.
pgm.render()
pgm.figure.savefig(filename + ".png", dpi=150)
pgm.figure.clf()
def get_expected_templates(self):
expected_templates = []
for tdc in self.TDCs:
expected_templates.append(tdc.get_expected_templates()[0])
return expected_templates
def test_TDC():
from cops_and_robots.human_tools.nlp.tagger import generate_test_data
tagged_document = generate_test_data()
phrase = tagged_document[9:14] # Nothing is next to the dresser
# phrase = tagged_document[42:47] # A robot's heading away from you
tdc = TDC(phrase)
print phrase
print "expected template", tdc.get_expected_templates()
def test_TDC_collection():
from cops_and_robots.human_tools.nlp.tagger import generate_test_data
tagged_document = generate_test_data()
TDC_collection = TDC_Collection(tagged_document)
# TDC_collection.print_TDCs()
filled_templates = TDC_collection.get_expected_templates()
return filled_templates
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
# test_TDC()
print test_TDC_collection()
|
apache-2.0
|
KristoferHellman/gimli
|
python/pygimli/mplviewer/__init__.py
|
1
|
2935
|
# -*- coding: utf-8 -*-
"""
Viewer interface .. dependents on matplotlib
"""
holdAxes_ = 0
def updateAxes(ax, a=None):
"""
for internal use
"""
if not holdAxes_:
try:
mpl.pyplot.pause(0.01)
except Exception as e:
#print(e)
pass
from .dataview import *
from .meshview import *
from .colorbar import *
from .overlayimage import *
import matplotlib.pyplot as plt
import numpy as np
def showLater(val=1):
raise('do not use')
import matplotlib.pyplot as plt
if val == 1:
plt.ion()
else:
plt.ioff()
plt.show()
def wait():
plt.pause(0.01)
plt.show()
goldenMean = 1.618 # (1.0 + math.sqrt(5.0)) / 2.0
def setOutputStyle(dim='w', paperMargin=5, xScale=1.0, yScale=1.0,
fontsize=9, scale=1, usetex=True):
"""
"""
if dim == 'w':
dim = 0
else:
dim = 1
a4 = [21.0, 29.7]
inches_per_cm = 1. / 2.54
inches_per_pt = 1.0 / 72.27 # pt/inch (latex)
golden_mean = (1.0 + math.sqrt(5.0)) / 2.0
textwidth = (a4[0] - paperMargin) * inches_per_cm
fig_width = textwidth * xScale # fig width in inches
fig_height = textwidth * yScale # fig height in inches
fig_size = [fig_width * scale, fig_height * scale]
# print "figsize:", fig_size
# fig.set_size_inches(fig_size)
#from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# rc('font',**{'family':'serif','serif':['Palatino']})
params = {'backend': 'ps',
'font.size': fontsize * scale,
#'font.weight' : 'bold',
'axes.labelsize': fontsize * scale,
'font.size': fontsize * scale,
'legend.fontsize': fontsize * scale,
'xtick.labelsize': fontsize * scale,
'ytick.labelsize': fontsize * scale,
# font.sans-serif : Bitstream Vera Sans, Lucida Grande, Verdana, Geneva, Lucid, Arial, Helvetica
#'font.cmb10' : 'cmb10',
#'font.family' : 'cursive',
'font.family': 'sans-serif',
#'font.sans-serif' : 'Helvetica',
'text.usetex': usetex,
'figure.figsize': fig_size,
'xtick.major.pad': 4 * scale,
'xtick.minor.pad': 4 * scale,
'ytick.major.pad': 4 * scale,
'ytick.minor.pad': 4 * scale,
'xtick.major.size': 4 * scale, # major tick size in points
'xtick.minor.size': 2 * scale, # minor tick size in points
'ytick.major.size': 4 * scale, # major tick size in points
'ytick.minor.size': 2 * scale, # minor tick size in points
'lines.markersize': 6 * scale,
'lines.linewidth': 0.6 * scale
}
plt.rcParams.update(params)
# def setOutPutStyle
|
gpl-3.0
|
Hornobster/Ball-Tracking
|
eval_classification_net.py
|
1
|
5280
|
#!/usr/bin/python
import numpy as np
import os
import sys
import h5py
import matplotlib.pyplot as plt
# check console arguments
if len(sys.argv) != 6:
print('Usage: %s descriptor model datasetDir batchSize numSamples' % sys.argv[0])
sys.exit(1)
# get console arguments
classifierDescriptor, classifierModel, datasetDir, batchSize, numSamples = sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])
caffe_root = os.getenv('CAFFE_ROOT', './')
sys.path.insert(0, caffe_root + '/python')
import caffe
BATCH_FILENAME_FORMAT = 'dataset_batch%d.hdf5'
FIRST_LAYER = 'conv1'
def loadBatch(datasetDir, batch_size, n):
data_arr = np.zeros((batch_size, 1, 100, 100))
label_arr = np.zeros((batch_size))
hdf5 = os.path.join(datasetDir, BATCH_FILENAME_FORMAT % n)
f = h5py.File(hdf5, "r")
images = f.keys()
for idx, i in enumerate(images):
if idx < batch_size:
data_arr[idx, 0, ...] = f[i][...]
label_arr[idx] = np.int32(f[i].attrs['HAS_SPHERE'])
data_arr /= 256.0 # transform to [0, 1)
f.close()
return data_arr, label_arr
# setup caffe
caffe.set_device(0)
caffe.set_mode_gpu()
# load model
net = caffe.Net(classifierDescriptor, classifierModel, caffe.TEST)
correct = 0
confusion_matrix = np.zeros((4), dtype='uint32') # [ TP, FN, FP, TN]
# auroc statistics
auroc_thresholds = np.linspace(0, 1, 100)
auroc_stats = np.zeros((len(auroc_thresholds), 4), dtype='uint32') # [ TP, FN, FP, TN ]
threshold_correct = np.zeros(len(auroc_thresholds), dtype='uint32')
# main loop
numBatches = numSamples // batchSize
for i in range(numBatches):
if i % 10 == 0:
print ('Testing batch %d / %d... %f%%' % (i, numBatches, float(i) / numBatches * 100.0))
# load new test batch
d, l = loadBatch(datasetDir, batchSize, i)
net.blobs['data'].data[...] = d
net.blobs['label'].data[...] = l
net.forward(start=FIRST_LAYER)
correct += sum(net.blobs['prob'].data.argmax(1) == net.blobs['label'].data)
predicted = net.blobs['prob'].data.argmax(1)
sphere_prob = net.blobs['prob'].data[:, 1]
for p in range(batchSize):
label = int(net.blobs['label'].data[p])
# update confusion matrix
if label == 1 and predicted[p] == 1: # true positive
confusion_matrix[0] += 1
elif label == 1 and predicted[p] == 0: # false negative
confusion_matrix[1] += 1
elif label == 0 and predicted[p] == 1: # false positive
confusion_matrix[2] += 1
elif label == 0 and predicted[p] == 0: # true negative
confusion_matrix[3] += 1
# update auroc stats
for idx, threshold in enumerate(auroc_thresholds):
if label == 1 and (sphere_prob[p] > threshold): # true positive
auroc_stats[idx][0] += 1
threshold_correct[idx] += 1
elif label == 1 and (sphere_prob[p] < threshold): # false negative
auroc_stats[idx][1] += 1
elif label == 0 and (sphere_prob[p] > threshold): # false positive
auroc_stats[idx][2] += 1
elif label == 0 and (sphere_prob[p] < threshold): # true negative
auroc_stats[idx][3] += 1
threshold_correct[idx] += 1
# compute Youden's index
thresholds_tpr = ((auroc_stats[:, 0]).astype(float) / (auroc_stats[:, 0] + auroc_stats[:, 1]))
thresholds_fpr = ((auroc_stats[:, 2]).astype(float) / (auroc_stats[:, 2] + auroc_stats[:, 3]))
youden = thresholds_tpr - thresholds_fpr
max_youden_idx = youden.argmax()
print ('With Threshold 50\%')
print ('Correct: %d / %d Accuracy: %f' % (correct, numSamples, float(correct) / numSamples * 100.0))
print ('True Positives: %d False Negatives: %d False Positives: %d True Negatives: %d' % (confusion_matrix[0], confusion_matrix[1], confusion_matrix[2], confusion_matrix[3]))
with np.errstate(divide='ignore', invalid='ignore'):
print ('TPR (Recall): %f FPR (Fall-Out): %f' % (float(confusion_matrix[0]) / (confusion_matrix[0] + confusion_matrix[1]), float(confusion_matrix[2]) / (confusion_matrix[2] + confusion_matrix[3])))
print ()
print ('With Threshold %f' % auroc_thresholds[max_youden_idx])
print ('Correct: %d / %d Accuracy: %f' % (threshold_correct[max_youden_idx], numSamples, float(threshold_correct[max_youden_idx]) / numSamples * 100.0))
print ('True Positives: %d False Negatives: %d False Positives: %d True Negatives: %d' % (auroc_stats[max_youden_idx][0], auroc_stats[max_youden_idx][1], auroc_stats[max_youden_idx][2], auroc_stats[max_youden_idx][3]))
with np.errstate(divide='ignore', invalid='ignore'):
print ('TPR (Recall): %f FPR (Fall-Out): %f' % (thresholds_tpr[max_youden_idx], thresholds_tpr[max_youden_idx]))
# plot auroc
z = np.linspace(min(thresholds_fpr), max(thresholds_fpr))
plt.plot(z, z, '--')
plt.plot(thresholds_fpr, thresholds_tpr, 'r')
plt.plot([thresholds_fpr[max_youden_idx], thresholds_fpr[max_youden_idx]], [thresholds_tpr[max_youden_idx], thresholds_fpr[max_youden_idx]], ':', lw=2)
plt.fill_between(thresholds_fpr, thresholds_tpr, 0, color='blue', alpha=0.3)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Best Threshold: %f' % auroc_thresholds[max_youden_idx])
plt.show()
|
mit
|
chrisjmccormick/simsearch
|
runDBSCAN.py
|
1
|
5841
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 01 11:19:04 2016
@author: Chris
"""
from simsearch import SimSearch
from sklearn.cluster import DBSCAN
import sklearn
import time
from sklearn.neighbors import NearestNeighbors
def findEps(ssearch):
"""
Find a good epsilon value to use.
"""
###########################################################################
# Calculate nearest neighbors
###########################################################################
# Create a nearest neighbors model--we need 2 nearest neighbors since the
# nearest neighbor to a point is going to be itself.
nbrs_model = NearestNeighbors(n_neighbors=2, algorithm='brute', metric='cosine').fit(ssearch.index.index)
t0 = time.time()
# Find nearest neighbors.
distances, indices = nbrs_model.kneighbors(ssearch.index.index)
elapsed = time.time() - t0
print 'Took %.2f seconds' % elapsed
distances = [d[1] for d in distances]
indeces = [ind[1] for ind in indices]
###########################################################################
# Histogram the nearest neighbor distances.
###########################################################################
import matplotlib.pyplot as plt
counts, bins, patches = plt.hist(distances, bins=16)
plt.title("Nearest neighbor distances")
plt.xlabel("Distance")
plt.ylabel("Frequency")
print '\n%d bins:' % len(counts)
countAcc = 0
num_points = len(ssearch.index.index)
for i in range(0, len(counts)):
countAcc += counts[i]
# Calculate the percentage of values which fall below the upper limit
# of this bin.
prcnt = float(countAcc) / float(num_points) * 100.0
print ' %.2f%% < %.2f' % (prcnt, bins[i + 1])
def findMinPts(ssearch, eps):
"""
Find a good value for MinPts.
"""
###########################################################################
# Count neighbors within threshold
###########################################################################
print 'Calculating pair-wise distances...'
# Calculate pair-wise cosine distance for all documents.
t0 = time.time()
DD = sklearn.metrics.pairwise.cosine_distances(ssearch.index.index)
elapsed = time.time() - t0
print ' Took %.2f seconds' % elapsed
print 'Counting number of neighbors...'
t0 = time.time()
# Create a list to hold the number of neighbors for each point.
numNeighbors = [0]*len(DD)
for i in range(0, len(DD)):
dists = DD[i]
count = 0
for j in range(0, len(DD)):
if (dists[j] < eps):
count += 1
numNeighbors[i] = count
elapsed = time.time() - t0
print ' Took %.2f seconds' % elapsed
###############################################################################
# Histogram the nearest neighbor distances.
###############################################################################
import matplotlib.pyplot as plt
counts, bins, patches = plt.hist(numNeighbors, bins=60)
plt.title("Number of neighbors")
plt.xlabel("Number of neighbors")
plt.ylabel("Frequency")
print '\n%d bins:' % (len(bins) - 1)
binsStr = ''
for b in bins:
binsStr += ' %0.2f' % b
print binsStr
def runClustering(ssearch, eps, min_samples):
"""
Run DBSCAN with the determined eps and MinPts values.
"""
print('Clustering all documents with DBSCAN, eps=%0.2f min_samples=%d' % (eps, min_samples))
# Initialize DBSCAN with parameters.
# I forgot to use cosine at first!
db = DBSCAN(eps=eps, min_samples=min_samples, metric='cosine', algorithm='brute')
# Time this step.
t0 = time.time()
# Cluster the LSI vectors.
db.fit(ssearch.index.index)
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print(" done in %.3fsec" % elapsed)
# Get the set of unique IDs.
cluster_ids = set(db.labels_)
# Show the number of clusters (don't include noise label)
print('Number of clusters (excluding "noise"): %d' % (len(cluster_ids) - 1))
# For each of the clusters...
for cluster_id in cluster_ids:
# Get the list of all doc IDs belonging to this cluster.
cluster_doc_ids = []
for doc_id in range(0, len(db.labels_)):
if db.labels_[doc_id] == cluster_id:
cluster_doc_ids.append(doc_id)
# Get the top words in this cluster
top_words = ssearch.getTopWordsInCluster(cluster_doc_ids)
print(' Cluster %d: (%d docs) %s' % (cluster_id, len(cluster_doc_ids), " ".join(top_words)))
def main():
"""
Entry point for the script.
"""
###########################################################################
# Load the corpus
###########################################################################
# Load the pre-built corpus.
print('Loading the saved SimSearch and corpus...')
(ksearch, ssearch) = SimSearch.load(save_dir='./mhc_corpus/')
print ' %d documents.' % len(ssearch.index.index)
# Step 1: Run a technique to find a good 'eps' value.
#findEps(ssearch)
#eps = 0.5
eps = 0.44
# Step 2: Run a technique to find a good 'MinPts' value.
# TODO - This took ~17 min. on my desktop!
#findMinPts(ssearch, eps)
#min_samples = 8
min_samples = 4
# Step 3: Run DBSCAN
runClustering(ssearch, eps, min_samples)
main()
|
mit
|
datapythonista/pandas
|
pandas/tests/indexes/base_class/test_formats.py
|
6
|
5155
|
import numpy as np
import pytest
import pandas._config.config as cf
from pandas import Index
class TestIndexRendering:
@pytest.mark.parametrize(
"index,expected",
[
# ASCII
# short
(
Index(["a", "bb", "ccc"]),
"""Index(['a', 'bb', 'ccc'], dtype='object')""",
),
# multiple lines
(
Index(["a", "bb", "ccc"] * 10),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object')",
),
# truncated
(
Index(["a", "bb", "ccc"] * 100),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n"
" ...\n"
" 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object', length=300)",
),
# Non-ASCII
# short
(
Index(["γ", "γγ", "γγγ"]),
"""Index(['γ', 'γγ', 'γγγ'], dtype='object')""",
),
# multiple lines
(
Index(["γ", "γγ", "γγγ"] * 10),
(
"Index(['γ', 'γγ', 'γγγ', 'γ', 'γγ', 'γγγ', "
"'γ', 'γγ', 'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ', 'γγ', 'γγγ', 'γ', 'γγ', 'γγγ', "
"'γ', 'γγ', 'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ'],\n"
" dtype='object')"
),
),
# truncated
(
Index(["γ", "γγ", "γγγ"] * 100),
(
"Index(['γ', 'γγ', 'γγγ', 'γ', 'γγ', 'γγγ', "
"'γ', 'γγ', 'γγγ', 'γ',\n"
" ...\n"
" 'γγγ', 'γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ', 'γ', 'γγ', 'γγγ'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.parametrize(
"index,expected",
[
# short
(
Index(["γ", "γγ", "γγγ"]),
("Index(['γ', 'γγ', 'γγγ'], dtype='object')"),
),
# multiple lines
(
Index(["γ", "γγ", "γγγ"] * 10),
(
"Index(['γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ', 'γγ', 'γγγ'],\n"
" dtype='object')"
""
),
),
# truncated
(
Index(["γ", "γγ", "γγγ"] * 100),
(
"Index(['γ', 'γγ', 'γγγ', 'γ', 'γγ', "
"'γγγ', 'γ', 'γγ', 'γγγ',\n"
" 'γ',\n"
" ...\n"
" 'γγγ', 'γ', 'γγ', 'γγγ', 'γ', "
"'γγ', 'γγγ', 'γ', 'γγ',\n"
" 'γγγ'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context("display.unicode.east_asian_width", True):
result = repr(index)
assert result == expected
def test_repr_summary(self):
with cf.option_context("display.max_seq_items", 10):
result = repr(Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
exp1 = arr.format()
out1 = ["True", "False", "NaN"]
assert out1 == exp1
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
|
bsd-3-clause
|
hbp-unibi/SNABSuite
|
plot/2dim_plot.py
|
1
|
5552
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SNABSuite -- Spiking Neural Architecture Benchmark Suite
# Copyright (C) 2017 Andreas StΓΆckel, Christoph Jenzen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
Plots data for two dimensional sweeps
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import argparse
parser = argparse.ArgumentParser(description='Plot two-dimensional images')
# Optional arguments
parser.add_argument("--zmin", type=float, help="minimal z-value")
parser.add_argument("--zmax", type=float, help="maximal z-value")
parser.add_argument(
"-nl", type=int, help="Number of levels/ticks in z", default=11)
parser.add_argument("-q", help="qualitative Colormap", action="store_true")
parser.add_argument("-c", help="draw contour lines", action="store_true")
# Required Parameters
parser.add_argument("-z", type=int, required=True, help="Column of z-values")
parser.add_argument("files", metavar="Files", nargs='+', help="files to plot")
args = parser.parse_args()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colorbar
import sys
import os
from dim_labels import *
def cm2inch(value):
return value / 2.54
def round_to_divisable(value, divisable):
if value == 0:
return 0
temp = np.abs(value)
a = 0
while temp < divisable:
temp *= 10.0
a += 1
if temp % divisable == 0:
return value
res = old_div((temp - (temp % divisable) + divisable), (10.0**a))
if value < 0:
return -res
return res
def plot_measure2d(xs, ys, zs, xlabel, ylabel, zlabel="", zmin=None,
zmax=None, qualitative=False, contour=True, title=None):
fig = plt.figure(figsize=(cm2inch(5.5), cm2inch(5.5)))
ax1 = fig.add_axes([0.0, 0.25, 1.0, 0.85])
if title is not None:
plt.title(title)
ax2 = fig.add_axes([0.0, 0.0, 1.0, 0.05])
_, steps_x = np.unique(xs, return_counts=True)
_, steps_y = np.unique(ys, return_counts=True)
steps_x = np.max(steps_x)
steps_y = np.max(steps_y)
xs = xs.reshape((steps_y, steps_x))
ys = ys.reshape((steps_y, steps_x))
zs = zs.reshape((steps_y, steps_x))
zs = zs.transpose()
# Auto-scale
idcs = np.isfinite(zs)
if np.sum(idcs) == 0:
return
if zmin is None:
zmin = np.min(zs[idcs])
if 0 < zmin < 1:
zmin = 0
else:
zmin = int(zmin)
if zmax is None:
zmax = round_to_divisable(np.max(zs[idcs]), args.nl - 1)
if zmin > 0:
zmax = zmax + zmin
if 0 < zmax < 1:
zmax = 1
# Select the colormap
if qualitative:
cmap = plt.cm.rainbow
else:
#cmap = plt.cm.Purples
# if zmin < 0.0:
cmap = plt.cm.PuOr
cmap.set_bad('black', 1.)
extent = (np.min(xs), np.max(xs), np.min(ys), np.max(ys))
ax1.imshow(zs, aspect='auto', origin='lower', extent=extent, cmap=cmap,
vmin=zmin, vmax=zmax, interpolation="none")
levels = np.linspace(zmin, zmax, args.nl)
zs = zs.transpose()
if contour:
CS2 = ax1.contour(xs, ys, zs, levels, linewidths=0.25,
colors='k', vmin=zmin, vmax=zmax)
ax1.grid(color='black', linestyle=':', linewidth=0.25)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
cbar = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,
orientation='horizontal', ticks=levels,
norm=matplotlib.colors.Normalize(zmin, zmax))
cbar.set_label(zlabel)
return fig
if not os.path.exists("images"):
os.mkdir("images")
for target_file in args.files:
simulator = target_file.split('_')[-1].split('.csv')[0]
experiment = target_file.split('/')[-1].split(simulator)[0]
#import data
results = np.genfromtxt(target_file, delimiter=',', names=True)
keys = results.dtype.names
data = np.zeros((results.shape[0], len(keys)))
for i in range(0, len(results)):
data[i] = np.array(list(results[i]))
fig = plot_measure2d(data[:, 0], data[:, 1], data[:, args.z],
xlabel=get_label(keys[0]), ylabel=get_label(keys[1]),
zlabel=get_label(keys[args.z]), zmin=args.zmin,
zmax=args.zmax, qualitative=args.q, contour=args.c,
title=SIMULATOR_LABELS[simulator])
if target_file.split('/')[-2]:
if not os.path.exists("images/" + target_file.split('/')[-2]):
os.mkdir("images/" + target_file.split('/')[-2])
fig.savefig("images/" + target_file.split('/')[-2] + "/" +
experiment + simulator + ".pdf", format='pdf',
bbox_inches='tight')
else:
fig.savefig("images/" + experiment + simulator + ".pdf", format='pdf',
bbox_inches='tight')
|
gpl-3.0
|
rcomer/iris
|
docs/gallery_code/meteorology/plot_COP_1d.py
|
4
|
3953
|
"""
Global Average Annual Temperature Plot
======================================
Produces a time-series plot of North American temperature forecasts for 2
different emission scenarios. Constraining data to a limited spatial area also
features in this example.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1
scenarios, both of which were derived using the IMAGE Integrated Assessment
Model (Johns et al. 2011; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
doi:10.1007/s00382-011-1005-5.
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
doi:10.1029/2009EO210001.
.. seealso::
Further details on the aggregation functionality being used in this example
can be found in :ref:`cube-statistics`.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data into three Cubes, one for each set of NetCDF files.
e1 = iris.load_cube(iris.sample_data_path("E1_north_america.nc"))
a1b = iris.load_cube(iris.sample_data_path("A1B_north_america.nc"))
# load in the global pre-industrial mean temperature, and limit the domain
# to the same North American region that e1 and a1b are at.
north_america = iris.Constraint(
longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60
)
pre_industrial = iris.load_cube(
iris.sample_data_path("pre-industrial.pp"), north_america
)
# Generate area-weights array. As e1 and a1b are on the same grid we can
# do this just once and re-use. This method requires bounds on lat/lon
# coords, so let's add some in sensible locations using the "guess_bounds"
# method.
e1.coord("latitude").guess_bounds()
e1.coord("longitude").guess_bounds()
e1_grid_areas = iris.analysis.cartography.area_weights(e1)
pre_industrial.coord("latitude").guess_bounds()
pre_industrial.coord("longitude").guess_bounds()
pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
# Perform the area-weighted mean for each of the datasets using the
# computed grid-box areas.
pre_industrial_mean = pre_industrial.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=pre_grid_areas
)
e1_mean = e1.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
)
a1b_mean = a1b.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
)
# Plot the datasets
qplt.plot(e1_mean, label="E1 scenario", lw=1.5, color="blue")
qplt.plot(a1b_mean, label="A1B-Image scenario", lw=1.5, color="red")
# Draw a horizontal line showing the pre-industrial mean
plt.axhline(
y=pre_industrial_mean.data,
color="gray",
linestyle="dashed",
label="pre-industrial",
lw=1.5,
)
# Constrain the period 1860-1999 and extract the observed data from a1b
constraint = iris.Constraint(
time=lambda cell: 1860 <= cell.point.year <= 1999
)
observed = a1b_mean.extract(constraint)
# Assert that this data set is the same as the e1 scenario:
# they share data up to the 1999 cut off.
assert np.all(np.isclose(observed.data, e1_mean.extract(constraint).data))
# Plot the observed data
qplt.plot(observed, label="observed", color="black", lw=1.5)
# Add a legend and title
plt.legend(loc="upper left")
plt.title("North American mean air temperature", fontsize=18)
plt.xlabel("Time / year")
plt.grid()
iplt.show()
if __name__ == "__main__":
main()
|
lgpl-3.0
|
stormsson/procedural_city_generation_wrapper
|
vendor/josauder/procedural_city_generation/roadmap/main.py
|
2
|
2015
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
gui=None
def main():
from procedural_city_generation.roadmap.config import config
from copy import copy
singleton=config()
front=copy(singleton.global_lists.vertex_list)
front.pop(0)
front.pop()
vertex_queue = copy(singleton.global_lists.vertex_queue)
from procedural_city_generation.roadmap.iteration import iteration
singleton.iterationszaehler=0
if singleton.plot == 1:
if gui is None:
import matplotlib.pyplot as plt
plt.close()
fig=plt.figure()
ax=plt.subplot(111)
fig.canvas.draw()
ax.set_xlim((-singleton.border[0], singleton.border[0]))
ax.set_ylim((-singleton.border[1], singleton.border[1]))
else:
gui.set_xlim((-singleton.border[0], singleton.border[0]))
gui.set_ylim((-singleton.border[1], singleton.border[1]))
i=0
while (front!=[] or singleton.global_lists.vertex_queue !=[]):
i+=1
front=iteration(front)
if singleton.plot == 1:
if i%singleton.plot_counter == 0:
if gui is None:
plt.pause(0.001)
try:
fig.canvas.blit(ax.bbox)
fig.canvas.flush_events()
except:
fig.canvas.draw()
else:
gui.update()
singleton.iterationszaehler=0
from procedural_city_generation.additional_stuff.pickletools import save_vertexlist
print("Roadmap is complete")
save_vertexlist(singleton.global_lists.vertex_list, singleton.output_name, singleton.savefig)
if gui is None and singleton.plot == 1:
if singleton.plot == 1:
plt.show()
return 0
if __name__ == '__main__':
import os, sys
parentpath=os.path.join(os.getcwd(), ("../../"))
sys.path.append(parentpath)
main()
|
mpl-2.0
|
nhejazi/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
159
|
2951
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
yavalvas/yav_com
|
build/matplotlib/doc/mpl_examples/pylab_examples/tricontour_demo.py
|
3
|
4605
|
"""
Contour plots of unstructured triangular grids.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
import math
# Creating a Triangulation without specifying the triangles results in the
# Delaunay triangulation of the points.
# First create the x and y coordinates of the points.
n_angles = 48
n_radii = 8
min_radius = 0.25
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
z = (np.cos(radii)*np.cos(angles*3.0)).flatten()
# Create the Triangulation; no triangles so Delaunay triangulation created.
triang = tri.Triangulation(x, y)
# Mask off unwanted triangles.
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
# pcolor plot.
plt.figure()
plt.gca().set_aspect('equal')
plt.tricontourf(triang, z)
plt.colorbar()
plt.tricontour(triang, z, colors='k')
plt.title('Contour plot of Delaunay triangulation')
# You can specify your own triangulation rather than perform a Delaunay
# triangulation of the points, where each triangle is given by the indices of
# the three points that make up the triangle, ordered in either a clockwise or
# anticlockwise manner.
xy = np.asarray([
[-0.101, 0.872], [-0.080, 0.883], [-0.069, 0.888], [-0.054, 0.890],
[-0.045, 0.897], [-0.057, 0.895], [-0.073, 0.900], [-0.087, 0.898],
[-0.090, 0.904], [-0.069, 0.907], [-0.069, 0.921], [-0.080, 0.919],
[-0.073, 0.928], [-0.052, 0.930], [-0.048, 0.942], [-0.062, 0.949],
[-0.054, 0.958], [-0.069, 0.954], [-0.087, 0.952], [-0.087, 0.959],
[-0.080, 0.966], [-0.085, 0.973], [-0.087, 0.965], [-0.097, 0.965],
[-0.097, 0.975], [-0.092, 0.984], [-0.101, 0.980], [-0.108, 0.980],
[-0.104, 0.987], [-0.102, 0.993], [-0.115, 1.001], [-0.099, 0.996],
[-0.101, 1.007], [-0.090, 1.010], [-0.087, 1.021], [-0.069, 1.021],
[-0.052, 1.022], [-0.052, 1.017], [-0.069, 1.010], [-0.064, 1.005],
[-0.048, 1.005], [-0.031, 1.005], [-0.031, 0.996], [-0.040, 0.987],
[-0.045, 0.980], [-0.052, 0.975], [-0.040, 0.973], [-0.026, 0.968],
[-0.020, 0.954], [-0.006, 0.947], [ 0.003, 0.935], [ 0.006, 0.926],
[ 0.005, 0.921], [ 0.022, 0.923], [ 0.033, 0.912], [ 0.029, 0.905],
[ 0.017, 0.900], [ 0.012, 0.895], [ 0.027, 0.893], [ 0.019, 0.886],
[ 0.001, 0.883], [-0.012, 0.884], [-0.029, 0.883], [-0.038, 0.879],
[-0.057, 0.881], [-0.062, 0.876], [-0.078, 0.876], [-0.087, 0.872],
[-0.030, 0.907], [-0.007, 0.905], [-0.057, 0.916], [-0.025, 0.933],
[-0.077, 0.990], [-0.059, 0.993]])
x = xy[:, 0]*180/3.14159
y = xy[:, 1]*180/3.14159
x0 = -5
y0 = 52
z = np.exp(-0.01*((x-x0)*(x-x0) + (y-y0)*(y-y0)))
triangles = np.asarray([
[67, 66, 1], [65, 2, 66], [ 1, 66, 2], [64, 2, 65], [63, 3, 64],
[60, 59, 57], [ 2, 64, 3], [ 3, 63, 4], [ 0, 67, 1], [62, 4, 63],
[57, 59, 56], [59, 58, 56], [61, 60, 69], [57, 69, 60], [ 4, 62, 68],
[ 6, 5, 9], [61, 68, 62], [69, 68, 61], [ 9, 5, 70], [ 6, 8, 7],
[ 4, 70, 5], [ 8, 6, 9], [56, 69, 57], [69, 56, 52], [70, 10, 9],
[54, 53, 55], [56, 55, 53], [68, 70, 4], [52, 56, 53], [11, 10, 12],
[69, 71, 68], [68, 13, 70], [10, 70, 13], [51, 50, 52], [13, 68, 71],
[52, 71, 69], [12, 10, 13], [71, 52, 50], [71, 14, 13], [50, 49, 71],
[49, 48, 71], [14, 16, 15], [14, 71, 48], [17, 19, 18], [17, 20, 19],
[48, 16, 14], [48, 47, 16], [47, 46, 16], [16, 46, 45], [23, 22, 24],
[21, 24, 22], [17, 16, 45], [20, 17, 45], [21, 25, 24], [27, 26, 28],
[20, 72, 21], [25, 21, 72], [45, 72, 20], [25, 28, 26], [44, 73, 45],
[72, 45, 73], [28, 25, 29], [29, 25, 31], [43, 73, 44], [73, 43, 40],
[72, 73, 39], [72, 31, 25], [42, 40, 43], [31, 30, 29], [39, 73, 40],
[42, 41, 40], [72, 33, 31], [32, 31, 33], [39, 38, 72], [33, 72, 38],
[33, 38, 34], [37, 35, 38], [34, 38, 35], [35, 37, 36]])
# Rather than create a Triangulation object, can simply pass x, y and triangles
# arrays to tripcolor directly. It would be better to use a Triangulation
# object if the same triangulation was to be used more than once to save
# duplicated calculations.
plt.figure()
plt.gca().set_aspect('equal')
plt.tricontourf(x, y, triangles, z)
plt.colorbar()
plt.title('Contour plot of user-specified triangulation')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
plt.show()
|
mit
|
musically-ut/statsmodels
|
statsmodels/stats/tests/test_statstools.py
|
23
|
10622
|
# TODO: Test robust skewness
# TODO: Test robust kurtosis
import numpy as np
import pandas as pd
from numpy.testing import (assert_almost_equal, assert_raises, TestCase)
from statsmodels.stats.stattools import (omni_normtest, jarque_bera,
durbin_watson, _medcouple_1d, medcouple,
robust_kurtosis, robust_skewness)
from statsmodels.stats._adnorm import normal_ad
#a random array, rounded to 4 decimals
x = np.array([-0.1184, -1.3403, 0.0063, -0.612, -0.3869, -0.2313, -2.8485,
-0.2167, 0.4153, 1.8492, -0.3706, 0.9726, -0.1501, -0.0337,
-1.4423, 1.2489, 0.9182, -0.2331, -0.6182, 0.183])
def test_durbin_watson():
#benchmark values from R car::durbinWatsonTest(x)
#library("car")
#> durbinWatsonTest(x)
#[1] 1.95298958377419
#> durbinWatsonTest(x**2)
#[1] 1.848802400319998
#> durbinWatsonTest(x[2:20]+0.5*x[1:19])
#[1] 1.09897993228779
#> durbinWatsonTest(x[2:20]+0.8*x[1:19])
#[1] 0.937241876707273
#> durbinWatsonTest(x[2:20]+0.9*x[1:19])
#[1] 0.921488912587806
st_R = 1.95298958377419
assert_almost_equal(durbin_watson(x), st_R, 14)
st_R = 1.848802400319998
assert_almost_equal(durbin_watson(x**2), st_R, 14)
st_R = 1.09897993228779
assert_almost_equal(durbin_watson(x[1:] + 0.5 * x[:-1]), st_R, 14)
st_R = 0.937241876707273
assert_almost_equal(durbin_watson(x[1:] + 0.8 * x[:-1]), st_R, 14)
st_R = 0.921488912587806
assert_almost_equal(durbin_watson(x[1:] + 0.9 * x[:-1]), st_R, 14)
def test_omni_normtest():
#tests against R fBasics
from scipy import stats
st_pv_R = np.array(
[[3.994138321207883, -1.129304302161460, 1.648881473704978],
[0.1357325110375005, 0.2587694866795507, 0.0991719192710234]])
nt = omni_normtest(x)
assert_almost_equal(nt, st_pv_R[:, 0], 14)
st = stats.skewtest(x)
assert_almost_equal(st, st_pv_R[:, 1], 14)
kt = stats.kurtosistest(x)
assert_almost_equal(kt, st_pv_R[:, 2], 11)
st_pv_R = np.array(
[[34.523210399523926, 4.429509162503833, 3.860396220444025],
[3.186985686465249e-08, 9.444780064482572e-06, 1.132033129378485e-04]])
x2 = x**2
#TODO: fix precision in these test with relative tolerance
nt = omni_normtest(x2)
assert_almost_equal(nt, st_pv_R[:, 0], 12)
st = stats.skewtest(x2)
assert_almost_equal(st, st_pv_R[:, 1], 12)
kt = stats.kurtosistest(x2)
assert_almost_equal(kt, st_pv_R[:, 2], 12)
def test_omni_normtest_axis():
#test axis of omni_normtest
x = np.random.randn(25, 3)
nt1 = omni_normtest(x)
nt2 = omni_normtest(x, axis=0)
nt3 = omni_normtest(x.T, axis=1)
assert_almost_equal(nt2, nt1, decimal=13)
assert_almost_equal(nt3, nt1, decimal=13)
def test_jarque_bera():
#tests against R fBasics
st_pv_R = np.array([1.9662677226861689, 0.3741367669648314])
jb = jarque_bera(x)[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([78.329987305556, 0.000000000000])
jb = jarque_bera(x**2)[:2]
assert_almost_equal(jb, st_pv_R, 13)
st_pv_R = np.array([5.7135750796706670, 0.0574530296971343])
jb = jarque_bera(np.log(x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([2.6489315748495761, 0.2659449923067881])
jb = jarque_bera(np.exp(-x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
def test_shapiro():
#tests against R fBasics
#testing scipy.stats
from scipy.stats import shapiro
st_pv_R = np.array([0.939984787255526, 0.239621898000460])
sh = shapiro(x)
assert_almost_equal(sh, st_pv_R, 4)
#st is ok -7.15e-06, pval agrees at -3.05e-10
st_pv_R = np.array([5.799574255943298e-01, 1.838456834681376e-06 * 1e4])
sh = shapiro(x**2) * np.array([1, 1e4])
assert_almost_equal(sh, st_pv_R, 5)
st_pv_R = np.array([0.91730442643165588, 0.08793704167882448])
sh = shapiro(np.log(x**2))
assert_almost_equal(sh, st_pv_R, 5)
#diff is [ 9.38773155e-07, 5.48221246e-08]
st_pv_R = np.array([0.818361863493919373, 0.001644620895206969])
sh = shapiro(np.exp(-x**2))
assert_almost_equal(sh, st_pv_R, 5)
def test_adnorm():
#tests against R fBasics
st_pv = []
st_pv_R = np.array([0.5867235358882148, 0.1115380760041617])
ad = normal_ad(x)
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([2.976266267594575e+00, 8.753003709960645e-08])
ad = normal_ad(x**2)
assert_almost_equal(ad, st_pv_R, 11)
st_pv.append(st_pv_R)
st_pv_R = np.array([0.4892557856308528, 0.1968040759316307])
ad = normal_ad(np.log(x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([1.4599014654282669312, 0.0006380009232897535])
ad = normal_ad(np.exp(-x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
ad = normal_ad(np.column_stack((x, x**2, np.log(x**2), np.exp(-x**2))).T,
axis=1)
assert_almost_equal(ad, np.column_stack(st_pv), 11)
def test_durbin_watson_pandas():
x = np.random.randn(50)
x_series = pd.Series(x)
assert_almost_equal(durbin_watson(x), durbin_watson(x_series), decimal=13)
class TestStattools(TestCase):
@classmethod
def setup_class(cls):
x = np.random.standard_normal(1000)
e1, e2, e3, e4, e5, e6, e7 = np.percentile(x, (12.5, 25.0, 37.5, 50.0, 62.5, 75.0, 87.5))
c05, c50, c95 = np.percentile(x, (5.0, 50.0, 95.0))
f025, f25, f75, f975 = np.percentile(x, (2.5, 25.0, 75.0, 97.5))
mean = np.mean
kr1 = mean(((x - mean(x)) / np.std(x))**4.0) - 3.0
kr2 = ((e7 - e5) + (e3 - e1)) / (e6 - e2) - 1.2330951154852172
kr3 = (mean(x[x > c95]) - mean(x[x < c05])) / (mean(x[x > c50]) - mean(x[x < c50])) - 2.5852271228708048
kr4 = (f975 - f025) / (f75 - f25) - 2.9058469516701639
cls.kurtosis_x = x
cls.expected_kurtosis = np.array([kr1, kr2, kr3, kr4])
cls.kurtosis_constants = np.array([3.0,1.2330951154852172,2.5852271228708048,2.9058469516701639])
def test_medcouple_no_axis(self):
x = np.reshape(np.arange(100.0), (50, 2))
mc = medcouple(x, axis=None)
assert_almost_equal(mc, medcouple(x.ravel()))
def test_medcouple_1d(self):
x = np.reshape(np.arange(100.0),(50,2))
assert_raises(ValueError, _medcouple_1d, x)
def test_medcouple_symmetric(self):
mc = medcouple(np.arange(5.0))
assert_almost_equal(mc, 0)
def test_medcouple_nonzero(self):
mc = medcouple(np.array([1, 2, 7, 9, 10.0]))
assert_almost_equal(mc, -0.3333333)
def test_medcouple_symmetry(self):
x = np.random.standard_normal(100)
mcp = medcouple(x)
mcn = medcouple(-x)
assert_almost_equal(mcp + mcn, 0)
def test_durbin_watson(self):
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
assert_almost_equal(dw, durbin_watson(x))
def test_durbin_watson_2d(self):
shape = (1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[:, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x))
def test_durbin_watson_3d(self):
shape = (10, 1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[None, :, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x, axis=1))
def test_robust_skewness_1d(self):
x = np.arange(21.0)
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_1d_2d(self):
x = np.random.randn(21)
y = x[:, None]
sk_x = robust_skewness(x)
sk_y = robust_skewness(y, axis=None)
assert_almost_equal(np.array(sk_x), np.array(sk_y))
def test_robust_skewness_symmetric(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_3d(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
x = np.tile(x, (10, 10, 1))
sk_3d = robust_skewness(x, axis=2)
result = np.zeros((10, 10))
for sk in sk_3d:
assert_almost_equal(sk, result)
def test_robust_kurtosis_1d_2d(self):
x = np.random.randn(100)
y = x[:, None]
kr_x = np.array(robust_kurtosis(x))
kr_y = np.array(robust_kurtosis(y, axis=None))
assert_almost_equal(kr_x, kr_y)
def test_robust_kurtosis(self):
x = self.kurtosis_x
assert_almost_equal(np.array(robust_kurtosis(x)), self.expected_kurtosis)
def test_robust_kurtosis_3d(self):
x = np.tile(self.kurtosis_x, (10, 10, 1))
kurtosis = np.array(robust_kurtosis(x, axis=2))
for i, r in enumerate(self.expected_kurtosis):
assert_almost_equal(r * np.ones((10, 10)), kurtosis[i])
def test_robust_kurtosis_excess_false(self):
x = self.kurtosis_x
expected = self.expected_kurtosis + self.kurtosis_constants
kurtosis = np.array(robust_kurtosis(x, excess=False))
assert_almost_equal(expected, kurtosis)
def test_robust_kurtosis_ab(self):
"""Test custom alpha, beta in kr3"""
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x<np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x<np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
def test_robust_kurtosis_dg(self):
"""Test custom delta, gamma in kr4"""
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False) #, '--pdb'
# run_module_suite()
#nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
# exit=False)
|
bsd-3-clause
|
IntelLabs/hpat
|
examples/series/str/series_str_center.py
|
1
|
1809
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_str_center():
series = pd.Series(['dog', 'foo', 'bar']) # Series of 'dog', 'foo', 'bar'
out_series = series.str.center(5, '*')
return out_series # Expect series of '*dog*', '*foo*', '*bar*'
print(series_str_center())
|
bsd-2-clause
|
xyguo/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
341
|
2620
|
"""
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
|
bsd-3-clause
|
usantamaria/mat281
|
clases/Unidad3-ModelamientoyError/Clase01-TrainingTestError/mat281_code/data_analysis.py
|
2
|
1944
|
import numpy as np
from matplotlib import pyplot as plt
# Define error function
def error(vector_e):
return abs(vector_e).mean()
# Load the data
N = 200
data = np.loadtxt("dataN%d.txt"%N)
sorted = True
s = "sorted" if sorted else ""
nmax = 71
# Some properties
color_t = "b"
color_p = "g"
# Sort or keep it unsorted
if sorted:
data = data[np.argsort(data[:,0])]
# Split into training and prediction data
t = int(N*.7)
x_t = data[:t,0]
x_p = data[t:,0]
y_t = data[:t,1]
y_p = data[t:,1]
# Some helper variables for nicer plotting
x = np.linspace(data[:,0].min(), data[:,0].max(), 1000)
# Fit best several models and record training error and prediction error
n_range = range(1, nmax)
error_t = []
error_p = []
for n in n_range:
fit_n = np.polyfit(x_t, y_t, n) # Obtains the best fitted polynomial of degree n
pol_n = np.poly1d(fit_n) # Creates the polynomial with coefficients as in fit n
plt.plot(x_t, y_t, 's'+color_t, alpha=0.5, label="Datos de Entrenamiento de Modelo")
if t<N:
plt.plot(x_p, y_p, 'o'+color_p, alpha=0.5, label="Datos para Testeo de Modelo")
plt.plot(x, 5*np.cos(.25*np.pi*x), 'k-', lw=2.0, label="Relacion determinista")
plt.plot(x, pol_n(x), 'r-', lw=2.0, label="Polinomio de grado %d"%n)
plt.xlim([-2.5,2.5])
plt.ylim([-5,10])
plt.legend(numpoints = 1, loc="lower center")
plt.savefig("images/data%sN%dpol%02d.png"%(s,N,n))
plt.close()
error_t.append( error(y_t - pol_n(x_t)) )
error_p.append( error(y_p - pol_n(x_p)) )
# Plot the errors
plt.loglog(n_range, error_t, "-s"+color_t, lw=2.0, label="Training error")
if t<N:
plt.loglog(n_range, error_p, "-o"+color_p, lw=2.0, label="Prediction error")
plt.legend(numpoints= 1)
plt.xlabel("Grado del polinomio")
plt.ylabel("Error")
plt.savefig("images/data%s_trainpred.png"%s)
plt.close()
# Save the error
data = np.array([ np.array(error_t), np.array(error_p)]).T
#print data
np.savetxt("images/data%serror_trainpred.txt"%s, data)
|
cc0-1.0
|
mattvonrocketstein/smash
|
smashlib/ipy3x/core/completer.py
|
1
|
42847
|
# encoding: utf-8
"""Word completion for IPython.
This module is a fork of the rlcompleter module in the Python standard
library. The original enhancements made to rlcompleter have been sent
upstream and were accepted as of Python 2.3, but we need a lot more
functionality specific to IPython, so this module will continue to live as an
IPython-specific utility.
Original rlcompleter documentation:
This requires the latest extension to the readline module (the
completes keywords, built-ins and globals in __main__; when completing
NAME.NAME..., it evaluates (!) the expression up to the last dot and
completes its attributes.
It's very cool to do "import string" type "string.", hit the
completion key (twice), and see the list of names defined by the
string module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
``__getattr__`` hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Some of this code originated from rlcompleter in the Python standard library
# Copyright (C) 2001 Python Software Foundation, www.python.org
import __main__
import glob
import inspect
import itertools
import keyword
import os
import re
import sys
from IPython.config.configurable import Configurable
from IPython.core.error import TryNext
from IPython.core.inputsplitter import ESC_MAGIC
from IPython.core.latex_symbols import latex_symbols
from IPython.utils import generics
from IPython.utils import io
from IPython.utils.decorators import undoc
from IPython.utils.dir2 import dir2
from IPython.utils.process import arg_split
from IPython.utils.py3compat import builtin_mod, string_types, PY3
from IPython.utils.traitlets import CBool, Enum
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Public API
__all__ = ['Completer', 'IPCompleter']
if sys.platform == 'win32':
PROTECTABLES = ' '
else:
PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
#-----------------------------------------------------------------------------
# Main functions and classes
#-----------------------------------------------------------------------------
def has_open_quotes(s):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either type in
the string is odd.
Returns
-------
If there is an open quote, the quote character is returned. Else, return
False.
"""
# We check " first, then ', so complex cases with nested quotes will get
# the " to take precedence.
if s.count('"') % 2:
return '"'
elif s.count("'") % 2:
return "'"
else:
return False
def protect_filename(s):
"""Escape a string to protect certain characters."""
return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
for ch in s])
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path) - 1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val
def compress_user(path, tilde_expand, tilde_val):
"""Does the opposite of expand_user, with its outputs.
"""
if tilde_expand:
return path.replace(tilde_val, '~')
else:
return path
def penalize_magics_key(word):
"""key for sorting that penalizes magic commands in the ordering
Normal words are left alone.
Magic commands have the initial % moved to the end, e.g.
%matplotlib is transformed as follows:
%matplotlib -> matplotlib%
[The choice of the final % is arbitrary.]
Since "matplotlib" < "matplotlib%" as strings,
"timeit" will appear before the magic "%timeit" in the ordering
For consistency, move "%%" to the end, so cell magics appear *after*
line magics with the same name.
A check is performed that there are no other "%" in the string;
if there are, then the string is not a magic command and is left unchanged.
"""
# Move any % signs from start to end of the key
# provided there are no others elsewhere in the string
if word[:2] == "%%":
if not "%" in word[2:]:
return word[2:] + "%%"
if word[:1] == "%":
if not "%" in word[1:]:
return word[1:] + "%"
return word
#@undoc
#class Bunch(object):
# pass
DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
GREEDY_DELIMS = ' =\r\n'
class CompletionSplitter(object):
"""An object to split an input line in a manner similar to readline.
By having our own implementation, we can expose readline-like completion in
a uniform manner to all frontends. This object only needs to be given the
line of text to be split and the cursor position on said line, and it
returns the 'word' to be completed on at the cursor after splitting the
entire line.
What characters are used as splitting delimiters can be controlled by
setting the `delims` attribute (this is a property that internally
automatically builds the necessary regular expression)"""
# Private interface
# A string of delimiter characters. The default value makes sense for
# IPython's most typical usage patterns.
_delims = DELIMS
# The expression (a normal string) to be compiled into a regular expression
# for actual splitting. We store it as an attribute mostly for ease of
# debugging, since this type of code can be so tricky to debug.
_delim_expr = None
# The regular expression that does the actual splitting
_delim_re = None
def __init__(self, delims=None):
delims = CompletionSplitter._delims if delims is None else delims
self.delims = delims
@property
def delims(self):
"""Return the string of delimiter characters."""
return self._delims
@delims.setter
def delims(self, delims):
"""Set the delimiters for line splitting."""
expr = '[' + ''.join('\\' + c for c in delims) + ']'
self._delim_re = re.compile(expr)
self._delims = delims
self._delim_expr = expr
def split_line(self, line, cursor_pos=None):
"""Split a line of text with a cursor at the given position.
"""
l = line if cursor_pos is None else line[:cursor_pos]
return self._delim_re.split(l)[-1]
class Completer(Configurable):
greedy = CBool(False, config=True,
help="""Activate greedy completion
This will enable completion on elements of lists, results of function calls, etc.,
but can be unsafe because the code is actually evaluated on TAB.
"""
)
def __init__(self, namespace=None, global_namespace=None, **kwargs):
"""Create a new completer for the command line.
Completer(namespace=ns,global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
super(Completer, self).__init__(**kwargs)
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
# print 'Completer->global_matches, txt=%r' % text # dbg
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
builtin_mod.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
# io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if m:
expr, attr = m.group(1, 3)
elif self.greedy:
m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
if not m2:
return []
expr, attr = m2.group(1, 2)
else:
return []
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
if self.limit_to__all__ and hasattr(obj, '__all__'):
words = get__all__entries(obj)
else:
words = dir2(obj)
try:
words = generics.complete_object(obj, words)
except TryNext:
pass
except Exception:
# Silence errors from completion function
# raise # dbg
pass
# Build match list to return
n = len(attr)
res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr]
return res
def get__all__entries(obj):
"""returns the strings in the __all__ attribute"""
try:
words = getattr(obj, '__all__')
except:
return []
return [w for w in words if isinstance(w, string_types)]
def match_dict_keys(keys, prefix):
"""Used by dict_key_matches, matching the prefix to a list of keys"""
if not prefix:
return None, 0, [repr(k) for k in keys
if isinstance(k, (string_types, bytes))]
quote_match = re.search('["\']', prefix)
quote = quote_match.group()
try:
prefix_str = eval(prefix + quote, {})
except Exception:
return None, 0, []
token_match = re.search(r'\w*$', prefix, re.UNICODE)
token_start = token_match.start()
token_prefix = token_match.group()
# TODO: support bytes in Py3k
matched = []
for key in keys:
try:
if not key.startswith(prefix_str):
continue
except (AttributeError, TypeError, UnicodeError):
# Python 3+ TypeError on b'a'.startswith('a') or vice-versa
continue
# reformat remainder of key to begin with prefix
rem = key[len(prefix_str):]
# force repr wrapped in '
rem_repr = repr(rem + '"')
if rem_repr.startswith('u') and prefix[0] not in 'uU':
# Found key is unicode, but prefix is Py2 string.
# Therefore attempt to interpret key as string.
try:
rem_repr = repr(rem.encode('ascii') + '"')
except UnicodeEncodeError:
continue
rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
if quote == '"':
# The entered prefix is quoted with ",
# but the match is quoted with '.
# A contained " hence needs escaping for comparison:
rem_repr = rem_repr.replace('"', '\\"')
# then reinsert prefix from start of token
matched.append('%s%s' % (token_prefix, rem_repr))
return quote, token_start, matched
def _safe_isinstance(obj, module, class_name):
"""Checks if obj is an instance of module.class_name if loaded
"""
return (module in sys.modules and
isinstance(obj, getattr(__import__(module), class_name)))
class IPCompleter(Completer):
"""Extension of the completer class with IPython-specific features"""
def _greedy_changed(self, name, old, new):
"""update the splitter and readline delims when greedy is changed"""
if new:
self.splitter.delims = GREEDY_DELIMS
else:
self.splitter.delims = DELIMS
if self.readline:
self.readline.set_completer_delims(self.splitter.delims)
merge_completions = CBool(True, config=True,
help="""Whether to merge completion results into a single list
If False, only the completion results from the first non-empty
completer will be returned.
"""
)
omit__names = Enum((0, 1, 2), default_value=2, config=True,
help="""Instruct the completer to omit private method names
Specifically, when completing on ``object.<tab>``.
When 2 [default]: all names that start with '_' will be excluded.
When 1: all 'magic' names (``__foo__``) will be excluded.
When 0: nothing will be excluded.
"""
)
limit_to__all__ = CBool(default_value=False, config=True,
help="""Instruct the completer to use __all__ for the completion
Specifically, when completing on ``object.<tab>``.
When True: only those names in obj.__all__ will be included.
When False [default]: the __all__ attribute is ignored
"""
)
def __init__(self, shell=None, namespace=None, global_namespace=None,
use_readline=True, config=None, **kwargs):
"""IPCompleter() -> completer
Return a completer object suitable for use by the readline library
via readline.set_completer().
Inputs:
- shell: a pointer to the ipython shell itself. This is needed
because this completer knows about magic functions, and those can
only be accessed via the ipython instance.
- namespace: an optional dict where completions are performed.
- global_namespace: secondary optional dict for completions, to
handle cases (such as IPython embedded inside functions) where
both Python scopes are visible.
use_readline : bool, optional
If true, use the readline library. This completer can still function
without readline, though in that case callers must provide some extra
information on each call about the current line."""
self.magic_escape = ESC_MAGIC
self.splitter = CompletionSplitter()
# Readline configuration, only used by the rlcompleter method.
if use_readline:
# We store the right version of readline so that later code
import IPython.utils.rlineimpl as readline
self.readline = readline
else:
self.readline = None
# _greedy_changed() depends on splitter and readline being defined:
Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
config=config, **kwargs)
# List where completion matches will be stored
self.matches = []
self.shell = shell
# Regexp to split filenames with spaces in them
self.space_name_re = re.compile(r'([^\\] )')
# Hold a local ref. to glob.glob for speed
self.glob = glob.glob
# Determine if we are running on 'dumb' terminals, like (X)Emacs
# buffers, to avoid completion problems.
term = os.environ.get('TERM', 'xterm')
self.dumb_terminal = term in ['dumb', 'emacs']
# Special handling of backslashes needed in win32 platforms
if sys.platform == "win32":
self.clean_glob = self._clean_glob_win32
else:
self.clean_glob = self._clean_glob
# regexp to parse docstring for function signature
self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# use this if positional argument name is also needed
#= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
# All active matcher routines for completion
self.matchers = [self.python_matches,
self.file_matches,
self.magic_matches,
self.python_func_kw_matches,
self.dict_key_matches,
]
def all_completions(self, text):
"""
Wrapper around the complete method for the benefit of emacs
and pydb.
"""
return self.complete(text)[1]
def _clean_glob(self, text):
return self.glob("%s*" % text)
def _clean_glob_win32(self, text):
return [f.replace("\\", "/")
for f in self.glob("%s*" % text)]
def file_matches(self, text):
"""Match filenames, expanding ~USER type strings.
Most of the seemingly convoluted logic in this completer is an
attempt to handle filenames with spaces in them. And yet it's not
quite perfect, because Python's readline doesn't expose all of the
GNU readline details needed for this to be done correctly.
For a filename with a space in it, the printed completions will be
only the parts after what's already been typed (instead of the
full completions, as is normally done). I don't think with the
current (as of Python 2.3) Python readline it's possible to do
better."""
# io.rprint('Completer->file_matches: <%r>' % text) # dbg
# chars that require escaping with backslash - i.e. chars
# that readline treats incorrectly as delimiters, but we
# don't want to treat as delimiters in filename matching
# when escaped with backslash
if text.startswith('!'):
text = text[1:]
text_prefix = '!'
else:
text_prefix = ''
text_until_cursor = self.text_until_cursor
# track strings with open quotes
open_quotes = has_open_quotes(text_until_cursor)
if '(' in text_until_cursor or '[' in text_until_cursor:
lsplit = text
else:
try:
# arg_split ~ shlex.split, but with unicode bugs fixed by us
lsplit = arg_split(text_until_cursor)[-1]
except ValueError:
# typically an unmatched ", or backslash without escaped char.
if open_quotes:
lsplit = text_until_cursor.split(open_quotes)[-1]
else:
return []
except IndexError:
# tab pressed on empty line
lsplit = ""
if not open_quotes and lsplit != protect_filename(lsplit):
# if protectables are found, do matching on the whole escaped name
has_protectables = True
text0, text = text, lsplit
else:
has_protectables = False
text = os.path.expanduser(text)
if text == "":
return [text_prefix + protect_filename(f) for f in self.glob("*")]
# Compute the matches from the filesystem
m0 = self.clean_glob(text.replace('\\', ''))
if has_protectables:
# If we had protectables, we need to revert our changes to the
# beginning of filename so that we don't double-write the part
# of the filename we have so far
len_lsplit = len(lsplit)
matches = [text_prefix + text0 +
protect_filename(f[len_lsplit:]) for f in m0]
else:
if open_quotes:
# if we have a string with an open quote, we don't need to
# protect the names at all (and we _shouldn't_, as it
# would cause bugs when the filesystem call is made).
matches = m0
else:
matches = [text_prefix +
protect_filename(f) for f in m0]
# io.rprint('mm', matches) # dbg
# Mark directories in input list by appending '/' to their names.
matches = [x + '/' if os.path.isdir(x) else x for x in matches]
return matches
def magic_matches(self, text):
"""Match magics"""
# print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
# Get all shell magics now rather than statically, so magics loaded at
# runtime show up too.
lsm = self.shell.magics_manager.lsmagic()
line_magics = lsm['line']
cell_magics = lsm['cell']
pre = self.magic_escape
pre2 = pre + pre
# Completion logic:
# - user gives %%: only do cell magics
# - user gives %: do both line and cell magics
# - no prefix: do both
# In other words, line magics are skipped if the user gives %%
# explicitly
bare_text = text.lstrip(pre)
comp = [pre2 + m for m in cell_magics if m.startswith(bare_text)]
if not text.startswith(pre2):
comp += [pre + m for m in line_magics if m.startswith(bare_text)]
return comp
def python_matches(self, text):
"""Match attributes or global python names"""
# io.rprint('Completer->python_matches, txt=%r' % text) # dbg
if "." in text:
try:
matches = self.attr_matches(text)
if text.endswith('.') and self.omit__names:
if self.omit__names == 1:
# true if txt is _not_ a __ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\.__.*?__', txt) is None)
else:
# true if txt is _not_ a _ name, false otherwise:
no__name = (lambda txt:
re.match(r'\._.*?', txt[txt.rindex('.'):]) is None)
matches = filter(no__name, matches)
except NameError:
# catches <undefined attributes>.<tab>
matches = []
else:
matches = self.global_matches(text)
return matches
def _default_arguments_from_docstring(self, doc):
"""Parse the first line of docstring for call signature.
Docstring should be of the form 'min(iterable[, key=func])\n'.
It can also parse cython docstring of the form
'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
"""
if doc is None:
return []
# care only the firstline
line = doc.lstrip().splitlines()[0]
#p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
#'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = self.docstring_sig_re.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
ret += self.docstring_kwd_re.findall(s)
return ret
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
call_obj = obj
ret = []
if inspect.isbuiltin(obj):
pass
elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
if inspect.isclass(obj):
# for cython embededsignature=True the constructor docstring
# belongs to the object itself not __init__
ret += self._default_arguments_from_docstring(
getattr(obj, '__doc__', ''))
# for classes, check for __init__,__new__
call_obj = (getattr(obj, '__init__', None) or
getattr(obj, '__new__', None))
# for all others, check if they are __call__able
elif hasattr(obj, '__call__'):
call_obj = obj.__call__
ret += self._default_arguments_from_docstring(
getattr(call_obj, '__doc__', ''))
try:
args, _, _1, defaults = inspect.getargspec(call_obj)
if defaults:
ret += args[-len(defaults):]
except TypeError:
pass
return list(set(ret))
def python_func_kw_matches(self, text):
"""Match named parameters (kwargs) of the last open function"""
if "." in text: # a parameter cannot be dotted
return []
try:
regexp = self.__funcParamsRegex
except AttributeError:
regexp = self.__funcParamsRegex = re.compile(r'''
'.*?(?<!\\)' | # single quoted strings or
".*?(?<!\\)" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
# 1. find the nearest identifier that comes before an unclosed
# parenthesis before the cursor
# e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
tokens = regexp.findall(self.text_until_cursor)
tokens.reverse()
iterTokens = iter(tokens)
openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(next(iterTokens))
if not isId(ids[-1]):
ids.pop()
break
if not next(iterTokens) == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = self.global_matches(ids[0])
else:
callableMatches = self.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
try:
namedArgs = self._default_arguments(eval(callableMatch,
self.namespace))
except:
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append("%s=" % namedArg)
return argMatches
def dict_key_matches(self, text):
"Match string keys in a dictionary, after e.g. 'foo[' "
def get_keys(obj):
# Only allow completion for known in-memory dict-like types
if isinstance(obj, dict) or\
_safe_isinstance(obj, 'pandas', 'DataFrame'):
try:
return list(obj.keys())
except Exception:
return []
elif _safe_isinstance(obj, 'numpy', 'ndarray'):
return obj.dtype.names or []
return []
try:
regexps = self.__dict_key_regexps
except AttributeError:
dict_key_re_fmt = r'''(?x)
( # match dict-referring expression wrt greedy setting
%s
)
\[ # open bracket
\s* # and optional whitespace
([uUbB]? # string prefix (r not handled)
(?: # unclosed string
'(?:[^']|(?<!\\)\\')*
|
"(?:[^"]|(?<!\\)\\")*
)
)?
$
'''
regexps = self.__dict_key_regexps = {
False: re.compile(dict_key_re_fmt % '''
# identifiers separated by .
(?!\d)\w+
(?:\.(?!\d)\w+)*
'''),
True: re.compile(dict_key_re_fmt % '''
.+
''')
}
match = regexps[self.greedy].search(self.text_until_cursor)
if match is None:
return []
expr, prefix = match.groups()
try:
obj = eval(expr, self.namespace)
except Exception:
try:
obj = eval(expr, self.global_namespace)
except Exception:
return []
keys = get_keys(obj)
if not keys:
return keys
closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
if not matches:
return matches
# get the cursor position of
# - the text being completed
# - the start of the key text
# - the start of the completion
text_start = len(self.text_until_cursor) - len(text)
if prefix:
key_start = match.start(2)
completion_start = key_start + token_offset
else:
key_start = completion_start = match.end()
# grab the leading prefix, to make sure all completions start with
# `text`
if text_start > key_start:
leading = ''
else:
leading = text[text_start:completion_start]
# the index of the `[` character
bracket_idx = match.end(1)
# append closing quote and bracket as appropriate
# this is *not* appropriate if the opening quote or bracket is outside
# the text given to this method
suf = ''
continuation = self.line_buffer[len(self.text_until_cursor):]
if key_start > text_start and closing_quote:
# quotes were opened inside text, maybe close them
if continuation.startswith(closing_quote):
continuation = continuation[len(closing_quote):]
else:
suf += closing_quote
if bracket_idx > text_start:
# brackets were opened inside text, maybe close them
if not continuation.startswith(']'):
suf += ']'
return [leading + k + suf for k in matches]
def latex_matches(self, text):
u"""Match Latex syntax for unicode characters.
This does both \\alp -> \\alpha and \\alpha -> Ξ±
Used on Python 3 only.
"""
slashpos = text.rfind('\\')
if slashpos > -1:
s = text[slashpos:]
if s in latex_symbols:
# Try to complete a full latex symbol to unicode
# \\alpha -> Ξ±
return s, [latex_symbols[s]]
else:
# If a user has partially typed a latex symbol, give them
# a full list of options \al -> [\aleph, \alpha]
matches = [k for k in latex_symbols if k.startswith(s)]
return s, matches
return u'', []
def dispatch_custom_completer(self, text):
# io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
line = self.line_buffer
if not line.strip():
return None
# Create a little structure to pass all the relevant information about
# the current completion to any custom completer.
event = Bunch()
event.line = line
event.symbol = text
cmd = line.split(None, 1)[0]
event.command = cmd
event.text_until_cursor = self.text_until_cursor
# print "\ncustom:{%s]\n" % event # dbg
# for foo etc, try also to find completer for %foo
if not cmd.startswith(self.magic_escape):
try_magic = self.custom_completers.s_matches(
self.magic_escape + cmd)
else:
try_magic = []
for c in itertools.chain(self.custom_completers.s_matches(cmd),
try_magic,
self.custom_completers.flat_matches(self.text_until_cursor)):
# print "try",c # dbg
try:
res = c(event)
if res:
# first, try case sensitive match
withcase = [r for r in res if r.startswith(text)]
if withcase:
return withcase
# if none, then case insensitive ones are ok too
text_low = text.lower()
return [r for r in res if r.lower().startswith(text_low)]
except TryNext:
pass
return None
def complete(self, text=None, line_buffer=None, cursor_pos=None):
"""Find completions for the given text and line context.
Note that both the text and the line_buffer are optional, but at least
one of them must be given.
Parameters
----------
text : string, optional
Text to perform the completion on. If not given, the line buffer
is split using the instance's CompletionSplitter object.
line_buffer : string, optional
If not given, the completer attempts to obtain the current line
buffer via readline. This keyword allows clients which are
requesting for text completions in non-readline contexts to inform
the completer of the entire text.
cursor_pos : int, optional
Index of the cursor in the full line buffer. Should be provided by
remote frontends where kernel has no access to frontend state.
Returns
-------
text : str
Text that was actually used in the completion.
matches : list
A list of completion matches.
"""
# io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) #
# dbg
# if the cursor position isn't given, the only sane assumption we can
# make is that it's at the end of the line (the common case)
if cursor_pos is None:
cursor_pos = len(line_buffer) if text is None else len(text)
if PY3:
latex_text = text if not line_buffer else line_buffer[:cursor_pos]
latex_text, latex_matches = self.latex_matches(latex_text)
if latex_matches:
return latex_text, latex_matches
# if text is either None or an empty string, rely on the line buffer
if not text:
text = self.splitter.split_line(line_buffer, cursor_pos)
# If no line buffer is given, assume the input text is all there was
if line_buffer is None:
line_buffer = text
self.line_buffer = line_buffer
self.text_until_cursor = self.line_buffer[:cursor_pos]
# io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# Start with a clean slate of completions
self.matches[:] = []
custom_res = self.dispatch_custom_completer(text)
if custom_res is not None:
# did custom completers produce something?
self.matches = custom_res
else:
# Extend the list of completions with the results of each
# matcher, so we return results to the user from all
# namespaces.
if self.merge_completions:
self.matches = []
for matcher in self.matchers:
try:
self.matches.extend(matcher(text))
except:
# Show the ugly traceback if the matcher causes an
# exception, but do NOT crash the kernel!
raise
sys.excepthook(*sys.exc_info())
else:
for matcher in self.matchers:
self.matches = matcher(text)
if self.matches:
break
# FIXME: we should extend our api to return a dict with completions for
# different types of objects. The rlcomplete() method could then
# simply collapse the dict into a list for readline, but we'd have
# richer completion semantics in other evironments.
# use penalize_magics_key to put magics after variables with same name
self.matches = sorted(set(self.matches), key=penalize_magics_key)
# io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
return text, self.matches
def rlcomplete(self, text, state):
"""Return the state-th possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Parameters
----------
text : string
Text to perform the completion on.
state : int
Counter used by readline.
"""
if state == 0:
self.line_buffer = line_buffer = self.readline.get_line_buffer()
cursor_pos = self.readline.get_endidx()
# io.rprint("\nRLCOMPLETE: %r %r %r" %
# (text, line_buffer, cursor_pos) ) # dbg
# if there is only a tab on a line with only whitespace, instead of
# the mostly useless 'do you want to see all million completions'
# message, just do the right thing and give the user his tab!
# Incidentally, this enables pasting of tabbed text from an editor
# (as long as autoindent is off).
# It should be noted that at least pyreadline still shows file
# completions - is there a way around it?
# don't apply this on 'dumb' terminals, such as emacs buffers, so
# we don't interfere with their own tab-completion mechanism.
if not (self.dumb_terminal or line_buffer.strip()):
self.readline.insert_text('\t')
sys.stdout.flush()
return None
# Note: debugging exceptions that may occur in completion is very
# tricky, because readline unconditionally silences them. So if
# during development you suspect a bug in the completion code, turn
# this flag on temporarily by uncommenting the second form (don't
# flip the value in the first line, as the '# dbg' marker can be
# automatically detected and is used elsewhere).
#DEBUG = False
DEBUG = True # dbg
if DEBUG:
try:
self.complete(text, line_buffer, cursor_pos)
except:
import traceback
traceback.print_exc()
else:
# The normal production version is here
# This method computes the self.matches array
self.complete(text, line_buffer, cursor_pos)
try:
return self.matches[state]
except IndexError:
return None
|
mit
|
RachitKansal/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
324
|
1862
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
|
bsd-3-clause
|
cwu2011/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
44
|
22866
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
|
bsd-3-clause
|
Windy-Ground/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
63
|
26190
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
|
bsd-3-clause
|
arnabgho/sklearn-theano
|
doc/conf.py
|
9
|
8057
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('sklearn-theano')
copyright = u('2014 - 2016, sklearn-theano developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1-git'
# The full version, including alpha/beta/rc tags.
import sklearn_theano
release = sklearn_theano.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sklearn-theano'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'sklearn-theano'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/mini_sloth_bw.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/mini_sloth_bw.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sklearn-theanodoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('sklearn-theano user guide'),
u('sklearn-theano developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/sklearn-theano-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
# to format example galleries:
app.add_javascript('js/examples.js')
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn_theano',
u'https://github.com/sklearn-theano/'
'sklearn-theano/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
kaushik94/pymc
|
pymc/examples/lasso_block_update.py
|
2
|
1663
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Sometimes, it is very useful to update a set of parameters together. For example, variables that are highly correlated are often good to update together. In PyMC 3 block updating is simple, as example will demonstrate.
#
# Here we have a LASSO regression model where the two coefficients are strongly correlated. Normally, we would define the coefficient parameters as a single random variable, but here we define them separately to show how to do block updates.
#
# First we generate some fake data.
# <codecell>
from matplotlib.pylab import *
from pymc import *
import numpy as np
d = np.random.normal(size=(3, 30))
d1 = d[0] + 4
d2 = d[1] + 4
yd = .2 * d1 + .3 * d2 + d[2]
# <markdowncell>
# Then define the random variables.
# <codecell>
with Model() as model:
s = Exponential('s', 1)
m1 = Laplace('m1', 0, 100)
m2 = Laplace('m2', 0, 100)
p = d1 * m1 + d2 * m2
y = Normal('y', p, s ** -2, observed=yd)
# <markdowncell>
# For most samplers, including Metropolis and HamiltonianMC, simply pass a
# list of variables to sample as a block. This works with both scalar and
# array parameters.
# <codecell>
with model:
step1 = Metropolis([m1, m2])
step2 = Metropolis([s], proposal_dist=LaplaceProposal)
def run(n=5000):
if n == "short":
n = 300
with model:
start = find_MAP()
trace = sample(n, [step1, step2], start)
dh = fn(hessian_diag(model.logpt))
# <codecell>
traceplot(trace)
# <codecell>
hexbin(trace[m1], trace[m2], gridsize=50)
# <codecell>
if __name__ == '__main__':
run()
|
apache-2.0
|
sandias42/mlware
|
feature_extract/dumb_extract_extra.py
|
1
|
5247
|
""" This file implements completely naive featurization of the xml files."""
import numpy as np
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import make_pipeline
import os
from scipy import io
from optparse import OptionParser
import sys
train_paths = os.listdir("../data/train_extra/")
test_paths = os.listdir("../data/test/")
train_ids = []
train_class = []
test_ids = []
# parse command line arguments
op_parse = OptionParser()
op_parse.add_option("--vectorizer", action="store",
type=str, default="hashing",
help="Specify the vectorizer to extract features. Sklearn's HashingVectorizer is default")
op_parse.add_option("--str_processing", default="naive", action="store",
help="Specify the method to parse xml. Default is no processing")
op_parse.add_option("--extract_id", action="store_true",
help="Extract and save the id from test and training data")
op_parse.add_option("--extract_class", action="store_true",
help="Extract and save the class label of each class in the training set")
(op, args) = op_parse.parse_args()
if len(args) > 0:
op.error("This script takes no arguments")
sys.exit(1)
op_parse.print_help()
def generate_xml_paths(train_paths, test_paths, xml_processor=lambda x: x, i=0):
"""
Processes the provided paths, extracting id and class information and
applying whatever function on the xml is desired.
xml_processor should takes in xml_string and should return something
"""
paths = train_paths + test_paths
print "The length of the test data is {0}, training data {1}".format(
len(test_paths), len(train_paths)
)
while i < len(paths):
abs_path = ''
# Split the file name into a list of [id, class_name, xml]
id_class_xml = paths[i].split('.')
assert id_class_xml[2] == 'xml'
# If the file is part of the test set, append the id to test_ids
if i >= len(train_paths):
if op.extract_id:
test_ids.append(id_class_xml[0])
assert id_class_xml[1] == 'X'
abs_path = os.path.join(
os.path.abspath("../data/test/"), paths[i])
# Otherwise file is in training set. Append id and class
else:
if op.extract_id:
train_ids.append(id_class_xml[0])
if op.extract_class:
train_class.append(id_class_xml[1])
abs_path = os.path.join(
os.path.abspath("../data/train_extra/"), paths[i])
# Open the file, process, and yield string
with open(abs_path, 'r') as xml_file:
xml_content = xml_processor(xml_file.read())
assert type(xml_content) == str
yield xml_content
print "sent file {0}, named \n {1} to processing".format(i, paths[i])
i += 1
# First try producing features with Hashing Vectorizer,
# Which returns a scipy_sparse matrix with shape
# (n_samples, 2 ** 20 features). Has some downsides and
# may not be useable in training
if op.vectorizer == "hashing":
# first use simple word tokens (whitespace sperated?)
word_hasher = HashingVectorizer()
hashed_sparse_mat = word_hasher.transform(
generate_xml_paths(train_paths, test_paths)
)
print hashed_sparse_mat
print type(hashed_sparse_mat)
# Save the matrix as follows
io.mmwrite("../data/features/naive_word_hashed_full_features.mtx",
hashed_sparse_mat)
elif op.vectorizer == "hash_4gram_tfidf":
# pipe vectorizer with ngrams and tfidf
pipe = make_pipeline(
HashingVectorizer(ngram_range=(1, 4)),
TfidfTransformer()
)
hashed_sparse_mat = pipe.fit_transform(
generate_xml_paths(train_paths, test_paths)
)
print hashed_sparse_mat
print type(hashed_sparse_mat)
# Save the matrix as follows
io.mmwrite("../data/features/tfifd_4gram_hashed_full_features.mtx",
hashed_sparse_mat)
elif op.vectorizer == "counts10000":
word_vectorizer = CountVectorizer(max_features=10000, vocabulary=None)
path_gen = generate_xml_paths(train_paths, test_paths)
count_vec_corpus = word_vectorizer.fit_transform(path_gen).toarray()
np.save("../data/features/count_vector_full_10k_features_extra.npy",
np.array(count_vec_corpus)
)
elif op.vectorizer == "counts_tfidf10000":
pipe = make_pipeline(CountVectorizer(
max_features=10000), TfidfTransformer())
normalized_corpus = pipe.fit_transform(
generate_xml_paths(train_paths, test_paths)
).toarray()
np.save("../data/features/count_vector_full_10k_features_tfidf.npy",
np.array(normalized_corpus)
)
elif op.vectorizer == "none":
[0 for __ in generate_xml_paths(train_paths, test_paths)]
if op.extract_id:
np.save("../data/features/test_ids_extra.npy", np.array(test_ids))
np.save("../data/features/train_ids_extra.npy", np.array(train_ids))
if op.extract_class:
np.save("../data/features/train_classes_extra.npy", np.array(train_class))
|
mit
|
zhushun0008/sms-tools
|
software/models_interface/harmonicModel_function.py
|
2
|
2895
|
# function to call the main analysis/synthesis functions in software/models/harmonicModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
import harmonicModel as HM
def main(inputFile='../../sounds/vignesh.wav', window='blackman', M=1201, N=2048, t=-90,
minSineDur=0.1, nH=100, minf0=130, maxf0=300, f0et=7, harmDevSlope=0.01):
"""
Analysis and synthesis using the harmonic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics could have higher allowed deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# detect harmonics of input sound
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
# synthesize the harmonics
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
# write the sound resulting from harmonic analysis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the harmonic frequencies
plt.subplot(3,1,2)
if (hfreq.shape[1] > 0):
numFrames = hfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
hfreq[hfreq<=0] = np.nan
plt.plot(frmTime, hfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of harmonic tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
if __name__ == "__main__":
main()
|
agpl-3.0
|
bhwester/computer-science-projects
|
data_analysis_and_visualization_system/classifiers.py
|
1
|
10735
|
# Template by Bruce Maxwell
# Spring 2015
# CS 251 Project 8
#
# Classifier class and child definitions
import sys
import data
import analysis as an
import numpy as np
import scipy.cluster.vq as vq
import sklearn.metrics as cm
class Classifier:
def __init__(self, type):
'''The parent Classifier class stores only a single field: the type of
the classifier. A string makes the most sense.
'''
self._type = type
def type(self, newtype=None):
'''Set or get the type with this function'''
if newtype != None:
self._type = newtype
return self._type
def confusion_matrix(self, truecats, classcats):
'''Takes in two Nx1 matrices of zero-index numeric categories and
computes the confusion matrix. The rows represent true
categories, and the columns represent the classifier output.
'''
counts = cm.confusion_matrix(truecats, classcats)
# numCats = len(np.unique(truecats))
# cmtx = np.empty((3, 3))
# counts = np.empty((numCats+1, numCats+1))
# for i in range(numCats):
# for j in range(numCats):
# counts[i, j] =
# accuracy = len([x for x in truecats == classcats])
# cmtx[0, 0] =
# cmtx[0, 1] =
# cmtx[1, 0] =
# cmtx[1, 1] =
return counts
def confusion_matrix_str(self, cmtx):
'''Takes in a confusion matrix and returns a string suitable for printing.'''
s = '\n\nConfusion matrix:\n\n'
for i in range(cmtx.shape[0]):
s += str(cmtx[i, :]) + '\n'
return s
def __str__(self):
'''Converts a classifier object to a string. Prints out the type.'''
return str(self._type)
class NaiveBayes(Classifier):
'''NaiveBayes implements a simple NaiveBayes classifier using a
Gaussian distribution as the pdf.
'''
def __init__(self, dataObj=None, headers=[], categories=None):
'''Takes in a Data object with N points, a set of F headers, and a
matrix of categories, one category label for each data point.'''
# call the parent init with the type
Classifier.__init__(self, 'Naive Bayes Classifier')
# store the headers used for classification
self.headers = headers
# number of classes and number of features
# original class labels
# unique data for the Naive Bayes: means, variances, scales
# if given data,
if dataObj != None:
# call the build function
self.build(dataObj.get_data(headers), categories)
def build(self, A, categories):
'''Builds the classifier given the data points in A and the categories'''
# figure out how many categories there are and get the mapping (np.unique)
unique, mapping = np.unique(np.array(categories.T), return_inverse=True)
self.num_classes = len(unique)
self.num_features = A.shape[1]
self.class_labels = np.matrix(mapping).T
# create the matrices for the means, vars, and scales
# the output matrices will be categories (C) x features (F)
self.class_means = np.zeros((self.num_classes, self.num_features))
self.class_vars = np.zeros((self.num_classes, self.num_features))
self.class_scales = np.zeros((self.num_classes, self.num_features))
# compute the means/vars/scales for each class
for i in range(self.num_classes):
data = A[(mapping == i), :]
self.class_means[i, :] = np.mean(data, axis=0)
self.class_vars[i, :] = np.var(data, axis=0)
self.class_scales[i, :] = 1/np.sqrt(2*np.pi*np.var(data, axis=0))
# store any other necessary information: # of classes, # of features, original labels
return
def classify(self, A, return_likelihoods=False):
'''Classify each row of A into one category. Return a matrix of
category IDs in the range [0..C-1], and an array of class
labels using the original label values. If return_likelihoods
is True, it also returns the NxC likelihood matrix.
'''
# error check to see if A has the same number of columns as
# the class means
assert A.shape[1] == self.class_means.shape[1]
# make a matrix that is N x C to store the probability of each
# class for each data point
P = np.matrix(np.zeros((A.shape[0], self.num_classes))) # a matrix of zeros that is N (rows of A) x C (number of classes)
# calculate the probabilities by looping over the classes
# with numpy-fu you can do this in one line inside a for loop
for i in np.arange(P.shape[1]):
P[:, i] = np.multiply(self.class_scales[i, :], np.exp(-np.square(A-self.class_means[i, :])/(2*self.class_vars[i, :]))).prod(axis=1)
# calculate the most likely class for each data point
cats = np.argmax(P, axis=1) # take the argmax of P along axis 1
print("Cats:")
print(cats)
# use the class ID as a lookup to generate the original labels
# FIXME: labels: get lookup to work
print("Class labels:")
print(self.class_labels)
# labels = self.class_labels[cats] isn't working
labels = self.class_labels[cats[:, 0], 0]
print("Labels:")
print(labels)
if return_likelihoods:
return cats, labels, P
return cats, labels
def __str__(self):
'''Make a pretty string that prints out the classifier information.'''
s = "\nNaive Bayes Classifier\n"
for i in range(self.num_classes):
s += 'Class %d --------------------\n' % (i)
s += 'Mean : ' + str(self.class_means[i,:]) + "\n"
s += 'Var : ' + str(self.class_vars[i,:]) + "\n"
s += 'Scales: ' + str(self.class_scales[i,:]) + "\n"
s += "\n"
return s
def write(self, filename):
'''Writes the Bayes classifier to a file.'''
# extension
return
def read(self, filename):
'''Reads in the Bayes classifier from the file'''
# extension
return
class KNN(Classifier):
def __init__(self, dataObj=None, headers=[], categories=None, K=None):
'''Take in a Data object with N points, a set of F headers, and a
matrix of categories, with one category label for each data point.'''
# call the parent init with the type
Classifier.__init__(self, 'KNN Classifier')
# store the headers used for classification
self.headers = headers
# number of classes and number of features
# original class labels
# unique data for the KNN classifier: list of exemplars (matrices)
# if given data,
if dataObj != None:
# call the build function
self.build(dataObj.getData(headers), categories)
def build(self, A, categories, K=None):
'''Builds the classifier given the data points in A and the categories'''
# figure out how many categories there are and get the mapping (np.unique)
unique, mapping = np.unique(np.array(categories.T), return_inverse=True)
self.num_classes = len(unique)
self.num_features = A.shape[1]
self.class_labels = np.matrix(mapping).T
# for each category i, build the set of exemplars
self.exemplars = []
for i in range(self.num_classes):
data = A[(mapping == i), :]
if K is None:
self.exemplars.append(data)
else:
# run K-means on the rows of A where the category/mapping is i
codebook, bookerror = vq.kmeans2(data, K)
print(codebook) # FIXME: numpy.linalg.linalg.LinAlgError: Matrix is not positive definite
self.exemplars.append(codebook)
# store any other necessary information: # of classes, # of features, original labels
return
def classify(self, A, K=3, return_distances=False):
'''Classify each row of A into one category. Return a matrix of
category IDs in the range [0..C-1], and an array of class
labels using the original label values. If return_distances is
True, it also returns the NxC distance matrix.
The parameter K specifies how many neighbors to use in the
distance computation. The default is three.'''
# error check to see if A has the same number of columns as the class means
assert A.shape[1] == self.num_features
# make a matrix that is N x C to store the distance to each class for each data point
N = A.shape[0]
D = np.zeros((N, self.num_classes)) # a matrix of zeros that is N (rows of A) x C (number of classes)
for i in range(self.num_classes):
# make a temporary matrix that is N x M where M is the number of exemplars (rows in exemplars[i])
M = self.exemplars[i].shape[0]
temp = np.zeros((N, M))
# calculate the distance from each point in A to each point in exemplar matrix i (for loop)
for j in range(N):
for k in range(M):
temp[j, k] = np.sqrt(np.sum(np.square(A[j, :] - self.exemplars[i][k, :])))
np.sort(temp, axis=1)
D[:, i] = np.sum(temp[:, 0:K], axis=1)
# calculate the most likely class for each data point
cats = np.matrix(np.argmin(D, axis=1)).T # take the argmin of D along axis 1
print("Cats:")
print(cats)
# use the class ID as a lookup to generate the original labels
# FIXME: labels: get lookup to work
print("Class labels:")
print(self.class_labels)
# labels = self.class_labels[cats] isn't working
labels = self.class_labels[cats[:, 0], 0]
print("Labels:")
print(labels)
if return_distances:
return cats, labels, D
return cats, labels
def __str__(self):
'''Make a pretty string that prints out the classifier information.'''
s = "\nKNN Classifier\n"
for i in range(self.num_classes):
s += 'Class %d --------------------\n' % (i)
s += 'Number of Exemplars: %d\n' % (self.exemplars[i].shape[0])
s += 'Mean of Exemplars :' + str(np.mean(self.exemplars[i], axis=0)) + "\n"
s += "\n"
return s
def write(self, filename):
'''Writes the KNN classifier to a file.'''
# extension
return
def read(self, filename):
'''Reads in the KNN classifier from the file'''
# extension
return
|
mit
|
loli/sklearn-ensembletrees
|
examples/svm/plot_rbf_parameters.py
|
26
|
4273
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters `gamma`
and `C` of the rbf kernel SVM.
Intuitively, the `gamma` parameter defines how far the influence
of a single training example reaches, with low values meaning 'far'
and high values meaning 'close'.
The `C` parameter trades off misclassification of training examples
against simplicity of the decision surface. A low C makes
the decision surface smooth, while a high C aims at classifying
all training examples correctly.
Two plots are generated. The first is a visualization of the
decision function for a variety of parameter values, and the second
is a heatmap of the classifier's cross-validation accuracy as
a function of `C` and `gamma`. For this example we explore a relatively
large grid for illustration purposes. In practice, a logarithmic
grid from `10**-3` to `10**3` is usually sufficient.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
Y = iris.target
# dataset for decision function visualization
X_2d = X[:, :2]
X_2d = X_2d[Y > 0]
Y_2d = Y[Y > 0]
Y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifier
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = 10.0 ** np.arange(-2, 9)
gamma_range = 10.0 ** np.arange(-5, 4)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedKFold(y=Y, n_folds=3)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, Y)
print("The best classifier is: ", grid.best_estimator_)
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1, 1e2, 1e4]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, Y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma 10^%d, C 10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.jet)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=Y_2d, cmap=plt.cm.jet)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
score_dict = grid.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# draw heatmap of accuracy as a function of gamma and C
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.spectral)
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.show()
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/lib/matplotlib/widgets.py
|
4
|
52893
|
"""
GUI Neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine an :class:`matplotlib.axes.Axes`
instance and pass that as the first arg. matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from __future__ import print_function
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use ``matplotlib.widgets.lock(someobj)`` to pr
"""
# FIXME: This docstring ends abruptly without...
def __init__(self):
self._owner = None
def __call__(self, o):
"""reserve the lock for *o*"""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""release the lock"""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""drawing is available to *o*"""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return True if *o* owns this lock"""
return self._owner is o
def locked(self):
"""Return True if the lock is currently held by an owner"""
return self._owner is not None
class Widget(object):
"""
Abstract base class for GUI neutral widgets
"""
drawon = True
eventson = True
class AxesWidget(Widget):
"""Widget that is connected to a single :class:`~matplotlib.axes.Axes`.
Attributes:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget
*canvas* : :class:`~matplotlib.backend_bases.FigureCanvasBase` subclass
The parent figure canvas for the widget.
*active* : bool
If False, the widget does not respond to events.
"""
def __init__(self, ax):
self.ax = ax
self.canvas = ax.figure.canvas
self.cids = []
self.active = True
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
class Button(AxesWidget):
"""
A GUI neutral button
The following attributes are accessible
*ax*
The :class:`matplotlib.axes.Axes` the button renders into.
*label*
A :class:`matplotlib.text.Text` instance.
*color*
The color of the button when not hovering.
*hovercolor*
The color of the button when hovering.
Call :meth:`on_clicked` to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
*ax*
The :class:`matplotlib.axes.Axes` instance the button
will be placed into.
*label*
The button text. Accepts string.
*image*
The image to place in the button, if not *None*.
Can be any legal arg to imshow (numpy array,
matplotlib Image instance, or PIL image).
*color*
The color of the button when not activated
*hovercolor*
The color of the button when the mouse is over it
"""
AxesWidget.__init__(self, ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if self.ignore(event):
return
if event.inaxes != self.ax:
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if not self.eventson:
return
if event.inaxes != self.ax:
return
for cid, func in self.observers.iteritems():
func(event)
def _motion(self, event):
if self.ignore(event):
return
if event.inaxes == self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon:
self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this *func* with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class Slider(AxesWidget):
"""
A slider representing a floating point range
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*
*valinit*
The slider initial position
*label*
The slider label
*valfmt*
Used to format the slider value
*closedmin* and *closedmax*
Indicate whether the slider interval is closed
*slidermin* and *slidermax*
Used to constrain the value of this slider to the values
of other sliders.
additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` which draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...)
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axvline(valinit, 0, 1, color='r', lw=1)
self.valfmt = valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt % valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
"""update the slider position"""
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.xdata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
def reset(self):
"""reset the slider to the initial value if needed"""
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(AxesWidget):
"""
A GUI neutral radio button
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are
located in
*labels*
List of :class:`matplotlib.text.Text` instances
*lines*
List of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have ``set_visible(False)``
when its box is not checked.
*rectangles*
List of :class:`matplotlib.patches.Rectangle` instances
Connect to the CheckButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*actives*
A len(buttons) list of booleans indicating whether
the button is active
"""
AxesWidget.__init__(self, ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels) > 1:
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color': 'k', 'linewidth': 1.25,
'transform': ax.transAxes, 'solid_capstyle': 'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy / 2., dy / 2.
x, y = 0.05, y - h / 2.
p = Rectangle(xy=(x, y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x + w], [y + h, y], **lineparams)
l2 = Line2D([x, x + w], [y, y + h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1, l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
for p, t, lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y)):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are in
*activecolor*
The color of the button when clicked
*labels*
A list of :class:`matplotlib.text.Text` instances
*circles*
A list of :class:`matplotlib.patches.Circle` instances
Connect to the RadioButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*active*
The index into labels for the button that is active
*activecolor*
The color of the button when clicked
"""
AxesWidget.__init__(self, ax)
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt == active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p, t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else:
return
for p in self.circles:
if p == inp:
color = self.activecolor
else:
color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, targetfig, toolfig):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
# FIXME: The docstring seems to just abruptly end without...
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s' % (self.slider.label.get_text(),
self.slider.valfmt)
return fmt % x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left',
0, 1, targetfig.subplotpars.left,
closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom,
'bottom', 0, 1,
targetfig.subplotpars.bottom,
closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1,
targetfig.subplotpars.right,
closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1,
targetfig.subplotpars.top,
closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace',
0, 1, targetfig.subplotpars.wspace,
closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace',
0, 1, targetfig.subplotpars.hspace,
closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace,)
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon:
self.targetfig.canvas.draw()
class Cursor(AxesWidget):
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
*horizOn*
Controls the visibility of the horizontal line
*vertOn*
Controls the visibility of the horizontal line
and the visibility of the cursor itself with the *visible* attribute
"""
def __init__(self, ax, horizOn=True, vertOn=True, useblit=False,
**lineprops):
"""
Add a cursor to *ax*. If ``useblit=True``, use the backend-
dependent blitting features for faster updates (GTKAgg
only for now). *lineprops* is a dictionary of line properties.
.. plot :: mpl_examples/widgets/cursor.py
"""
# TODO: Is the GTKAgg limitation still true?
AxesWidget.__init__(self, ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = horizOn
self.vertOn = vertOn
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
"""on mouse motion draw the cursor if visible"""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
show()
"""
def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
xmin, xmax = axes[-1].get_xlim()
ymin, ymax = axes[-1].get_ylim()
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if useblit:
lineprops['animated'] = True
if vertOn:
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
else:
self.vlines = []
if horizOn:
self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
for ax in axes]
else:
self.hlines = []
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
"""clear the cursor"""
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
if self.horizOn:
for line in self.hlines:
line.set_ydata((event.ydata, event.ydata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector(AxesWidget):
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
*onmove_callback* is an optional callback that is called on mouse
move within the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
rectprops=None, onmove_callback=None):
"""
Create a span selector in *ax*. When a selection is made, clear
the span and call *onselect* with::
onselect(vmin, vmax)
and clear the span.
*direction* must be 'horizontal' or 'vertical'
If *minspan* is not *None*, ignore events smaller than *minspan*
The span rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', alpha=0.5)
Set the visible attribute to *False* if you want to turn off
the functionality of the span selector
"""
AxesWidget.__init__(self, ax)
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.visible = True
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
# Set useblit based on original canvas.
self.useblit = useblit and self.canvas.supports_blit
# Reset canvas so that `new_axes` connects events.
self.canvas = None
self.new_axes(ax)
def new_axes(self, ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
self.disconnect_events()
self.canvas = ax.figure.canvas
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData,
self.ax.transAxes)
w, h = 0, 1
else:
trans = blended_transform_factory(self.ax.transAxes,
self.ax.transData)
w, h = 1, 0
self.rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
if not self.useblit:
self.ax.add_patch(self.rect)
def update_background(self, event):
"""force an update of the background"""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
"""return *True* if *event* should be ignored"""
widget_off = not self.visible or not self.active
non_event = event.inaxes != self.ax or event.button != 1
return widget_off or non_event
def press(self, event):
"""on button press event"""
if self.ignore(event):
return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
"""on button release event"""
if self.ignore(event) and not self.buttonDown:
return
if self.pressv is None:
return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span < self.minspan:
return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
"""
Draw using newfangled blit or oldfangled draw depending
on *useblit*
"""
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
"""on motion notify event"""
if self.pressv is None or self.ignore(event):
return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv > maxv:
minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv - minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv - minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
class RectangleSelector(AxesWidget):
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data',
button=None):
"""
Create a selector in *ax*. When a selection is made, clear
the span and call onselect with::
onselect(pos_1, pos_2)
and clear the drawn box/line. The ``pos_1`` and ``pos_2`` are
arrays of length 2 containing the x- and y-coordinate.
If *minspanx* is not *None* then events smaller than *minspanx*
in x direction are ignored (it's the same for y).
The rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with *lineprops*; default::
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use *drawtype* if you want the mouse to draw a line,
a box or nothing between click and actual position by setting
``drawtype = 'line'``, ``drawtype='box'`` or ``drawtype = 'none'``.
*spancoords* is one of 'data' or 'pixels'. If 'data', *minspanx*
and *minspanx* will be interpreted in the same coordinates as
the x and y axis. If 'pixels', they are in pixels.
*button* is a list of integers indicating which mouse buttons should
be used for rectangle selection. You can also specify a single
integer if only a single button is desired. Default is *None*,
which does not limit which button can be used.
Note, typically:
1 = left mouse button
2 = center mouse button (scroll wheel)
3 = right mouse button
"""
AxesWidget.__init__(self, ax)
self.visible = True
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor='black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0, 0),
0, 1, visible=False, **self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth=2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0, 0], [0, 0], visible=False,
**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.minspanx = minspanx
self.minspany = minspany
if button is None or isinstance(button, list):
self.validButtons = button
elif isinstance(button, int):
self.validButtons = [button]
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
"""force an update of the background"""
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
"""return *True* if *event* should be ignored"""
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# Only do rectangle selection if event was triggered
# with a desired button
if self.validButtons is not None:
if not event.button in self.validButtons:
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress is None:
return event.inaxes != self.ax
# If a button was pressed, check if the release-button is the
# same. If event is out of axis, limit the data coordinates to axes
# boundaries.
if event.button == self.eventpress.button and event.inaxes != self.ax:
(xdata, ydata) = self.ax.transData.inverted().transform_point(
(event.x, event.y))
x0, x1 = self.ax.get_xbound()
y0, y1 = self.ax.get_ybound()
xdata = max(x0, xdata)
xdata = min(x1, xdata)
ydata = max(y0, ydata)
ydata = min(y1, ydata)
event.xdata = xdata
event.ydata = ydata
return False
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes != self.ax or
event.button != self.eventpress.button)
def press(self, event):
"""on button press event"""
if self.ignore(event):
return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
"""on button release event"""
if self.eventpress is None or self.ignore(event):
return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords == 'data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords == 'pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx < self.minspanx
yproblems = self.minspany is not None and spany < self.minspany
if (((self.drawtype == 'box') or (self.drawtype == 'line')) and
(xproblems or yproblems)):
# check if drawn distance (if it exists) is not too small in
# neither x nor y-direction
return
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
"""draw using newfangled blit or oldfangled draw depending on
useblit
"""
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
"""on motion notify event if box/line is wanted"""
if self.eventpress is None or self.ignore(event):
return
x, y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx > maxx:
minx, maxx = maxx, minx # get them in the right order
if miny > maxy:
miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx - minx) # set width and height of box
self.to_draw.set_height(maxy - miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
"""
Use this to activate / deactivate the RectangleSelector
from your program with an boolean parameter *active*.
"""
self.active = active
def get_active(self):
""" Get status of active mode (boolean variable)"""
return self.active
class LassoSelector(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select
data points from an image.
In contrast to :class:`Lasso`, `LassoSelector` is written with an interface
similar to :class:`RectangleSelector` and :class:`SpanSelector` and will
continue to interact with the axes until disconnected.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*onselect* : function
Whenever the lasso is released, the `onselect` function is called and
passed the vertices of the selected path.
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(verts):
print verts
lasso = LassoSelector(ax, onselect)
"""
def __init__(self, ax, onselect=None, useblit=True, lineprops=None):
AxesWidget.__init__(self, ax)
self.useblit = useblit and self.canvas.supports_blit
self.onselect = onselect
self.verts = None
if lineprops is None:
lineprops = dict()
self.line = Line2D([], [], **lineprops)
self.line.set_visible(False)
self.ax.add_line(self.line)
self.connect_event('button_press_event', self.onpress)
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.update_background)
def ignore(self, event):
wrong_button = hasattr(event, 'button') and event.button != 1
return not self.active or wrong_button
def onpress(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
self.verts = [(event.xdata, event.ydata)]
self.line.set_visible(True)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
if event.inaxes == self.ax:
self.verts.append((event.xdata, event.ydata))
self.onselect(self.verts)
self.line.set_data([[], []])
self.line.set_visible(False)
self.verts = None
def onmove(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def update_background(self, event):
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
class Lasso(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select data points
from an image.
Unlike :class:`LassoSelector`, this must be initialized with a starting
point `xy`, and the `Lasso` events are destroyed upon release.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*xy* : array
Coordinates of the start of the lasso.
*callback* : function
Whenever the lasso is released, the `callback` function is called and
passed the vertices of the selected path.
"""
def __init__(self, ax, xy, callback=None, useblit=True):
AxesWidget.__init__(self, ax)
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
x, y = xy
self.verts = [(x, y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts) > 2:
self.callback(self.verts)
self.ax.lines.remove(self.line)
self.verts = None
self.disconnect_events()
def onmove(self, event):
if self.ignore(event):
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
|
mit
|
lucabaldini/ximpol
|
ximpol/config/abell401.py
|
1
|
2671
|
#!/usr/bin/env python
#
# Copyright (C) 2015--2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
import os
from ximpol.srcmodel.roi import xPointSource, xROIModel
from ximpol.srcmodel.spectrum import power_law
from ximpol.srcmodel.polarization import xPolarizationMap, constant
from ximpol.core.spline import xInterpolatedUnivariateSplineLinear
from ximpol.utils.logging_ import logger
from ximpol import XIMPOL_CONFIG
"""Configuration file for a model of Abell 401.
"""
def parse_spectral_model(file_name, emin=0.5, emax=15.):
"""Parse the input file with the spectral point.
"""
file_path = os.path.join(XIMPOL_CONFIG, 'ascii', file_name)
logger.info('Parsing input file %s...' % file_path)
_energy, _binw, _flux, _fluxerr, _mod, _a, _b = numpy.loadtxt(file_path,
unpack=True)
_mask = (_energy >= emin)*(_energy <= emax)
_energy = _energy[_mask]
_mod = _mod[_mask]
_mod /= _energy**2.
fmt = dict(xname='Energy', xunits='keV', yname='Flux',
yunits='cm$^{-2}$ s$^{-1}$ keV$^{-1}$')
return xInterpolatedUnivariateSplineLinear(_energy, _mod, **fmt)
ROI_MODEL = xROIModel(44.7371, 13.5822)
# Read in the spectral models.
spectral_model_spline = parse_spectral_model('Abell_401.txt')
def energy_spectrum(E, t):
return spectral_model_spline(E)
polarization_degree = constant(0.)
polarization_angle = constant(0.)
abell401 = xPointSource('Abell 401', ROI_MODEL.ra, ROI_MODEL.dec,
energy_spectrum, polarization_degree, polarization_angle)
ROI_MODEL.add_source(abell401)
def display():
"""Display the source model.
"""
from ximpol.utils.matplotlib_ import pyplot as plt
from ximpol.srcmodel.img import xFITSImage
print(ROI_MODEL)
fig = plt.figure('Energy spectrum')
spectral_model_spline.plot(logy=True, show=False, label='Total')
plt.show()
if __name__ == '__main__':
display()
|
gpl-3.0
|
KrasnitzLab/SCGV
|
scgv/views/heatmap.py
|
1
|
1354
|
'''
Created on Dec 14, 2016
@author: lubo
'''
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from utils.color_map import ColorMap
from scgv.views.base import ViewerBase
from scgv.utils.color_map import ColorMap
class HeatmapViewer(ViewerBase):
def __init__(self, model):
super(HeatmapViewer, self).__init__(model)
self.cmap = ColorMap.make_diverging05()
def draw_heatmap(self, ax):
if self.model.heatmap is not None:
ax.imshow(self.model.heatmap,
aspect='auto',
interpolation='nearest',
# cmap=plt.get_cmap('seismic'), # self.cmap.colors,
cmap=self.cmap.colors,
vmin=self.NORMALIZE_MIN,
vmax=self.NORMALIZE_MAX,
# norm=self.cmap.norm,
extent=self.model.heat_extent)
chrom_lines = self.model.calc_chrom_lines_index()
for chrom_line in chrom_lines:
ax.axhline(y=chrom_line, color="#888888", linewidth=0.5)
chrom_labelspos = self.calc_chrom_labels_pos(chrom_lines)
ax.set_yticks(chrom_labelspos)
if len(chrom_labelspos) <= len(self.CHROM_LABELS):
ax.set_yticklabels(
self.CHROM_LABELS[:len(chrom_labelspos)], fontsize=9)
|
mit
|
mugizico/scikit-learn
|
examples/linear_model/plot_logistic.py
|
312
|
1426
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
|
bsd-3-clause
|
h-mayorquin/camp_india_2016
|
project1/plot_blocks.py
|
1
|
1348
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
sns.set(font_scale=2.0)
sns.set_style('whitegrid')
data_before = np.load('block_before.npy')
data_after = np.load('block_after.npy')
x = data_before[0]
y = data_after[0]
gs = gridspec.GridSpec(3, 3)
fig = plt.figure(figsize=(16, 12))
fig.suptitle('Ventral vs Dorsal prediction before and after training')
for x in range(3):
score_before = data_before[x]
score_after = data_after[x]
ax0 = fig.add_subplot(gs[x, 0])
ax0.bar([1], [score_before], align='center', label='before')
ax0.bar([2], [score_after], color='r', align='center', label='after')
ax0.legend()
ax0.set_ylim([0, 1])
ax0.set_xlim([-0.1, 3.1])
ax0.xaxis.set_visible(False)
for y in range(1,3):
score_before = data_before[x + 3 * y]
score_after = data_after[x + 3 * y]
ax = fig.add_subplot(gs[x, y], sharey=ax0)
ax.bar([1], [score_before], align='center', label='before')
ax.bar([2], [score_after], color='r', align='center', label='after')
ax.legend()
ax.set_ylim([0, 1])
ax.set_xlim([-0.1, 3.1])
# Remove axis
ax.xaxis.set_visible(False)
# if(y > 0):
# ax.yaxis.set_visible(False)
plt.show()
|
mit
|
alanmitchell/fnsb-benchmark
|
benchmark.py
|
1
|
65642
|
""" -------------------- MAIN BENCHMARKING SCRIPT -----------------------
Run this script by executing the following from a command prompt:
python3 benchmark.py
Or, use just "python benchmark.py" if that is how you normally access Python 3.
One some operating systems (Linux, Mac OSX), you may be able to run the script
by executing "./benchmark.py" if you have changed the file mode of the file
to be executable.
This script uses settings from a "settings.py" file, which should be placed
in the same directory as this script. Start by copying "settings_example.py"
to "settings.py" and then modify settings in that copied file.
In the settings file, you can specify the path to the Utility Bill CSV file you
want to read and the spreadsheet Other Data file, which contains the list of
sites to process, information (e.g. square feet) about each site, and degree day
data. Modify this spreadsheet according to your needs; create multiple
versions if you sometimes only want to process some of the sites. The "data"
directory is the best place to put Utility Bill and Other Data files.
All reports and other output from this script appear in the "output" directory.
View the resulting benchmarking report by opening the "output/index.html" file.
Other useful data is put in the "output/extra_data" directory, including a
spreadsheet that summarizes utility information for all of the buildings.
Each time the script is run, all files in the output directory are deleted and
replaced with new files. So, if you have modified any of these files and want
to save your modifications, copy the files to a location outside the output
directory.
The main script code is found at the *bottom* of this file; prior to the script
are the functions that do that main work. This code handles the main control
flow of the script. This script also relies on a couple
of modules: bench_util.py, graph_util.py, and template_util.py
These are present in this directory.
"""
import time
import pickle
import glob
import os
import pprint
import datetime
import warnings
import pandas as pd
import numpy as np
import bench_util as bu
import graph_util as gu
import template_util
import shutil
import settings # the file holding settings for this script
# Filter out Matplotlib warnings, as we sometimes get warnings
# related to blank graphs.
warnings.filterwarnings("ignore", module="matplotlib")
#*****************************************************************************
#*****************************************************************************
# ----------------------Function for Preprocessing Data ----------------------
def preprocess_data():
"""Loads and processes the Utility Bill data into a smaller and more usable
form. Returns
- a DataFrame with the raw billing data,
- a DataFrame with the preprocessed data,
- and a bench_util.Util object, which provides useful functions to
the analysis portion of this script.
This the "preprocess_data.ipynb" was used to develop this code and shows
intermdediate results from each of the steps.
"""
# --- Read the CSV file and convert the billing period dates into
# real Pandas dates
fn = settings.UTILITY_BILL_FILE_PATH
msg('Starting to read Utility Bill Data File.')
dfu = pd.read_csv(fn,
parse_dates=['From', 'Thru'],
dtype={'Site ID': 'object', 'Account Number': 'object'}
)
msg('Removing Unneeded columns and Combining Charges.')
# Filter down to the needed columns and rename them
cols = [
('Site ID', 'site_id'),
('From', 'from_dt'),
('Thru', 'thru_dt'),
('Service Name', 'service_type'),
('Item Description', 'item_desc'),
('Usage', 'usage'),
('Cost', 'cost'),
('Units', 'units'),
]
old_cols, new_cols = zip(*cols) # unpack into old and new column names
dfu1 = dfu[list(old_cols)].copy() # select just those columns from the origina dataframe
dfu1.columns = new_cols # rename the columns
# --- Collapse Non-Usage Changes into "Other Charge"
# This cuts the processing time in half due to not having to split a whole
# bunch of non-consumption charges.
dfu1.loc[np.isnan(dfu1.usage), 'item_desc'] = 'Other Charge'
# Pandas can't do a GroupBy on NaNs, so replace with something
dfu1.units.fillna('-', inplace=True)
dfu1 = dfu1.groupby(['site_id',
'from_dt',
'thru_dt',
'service_type',
'item_desc',
'units']).sum()
dfu1.reset_index(inplace=True)
# --- Split Each Bill into Multiple Pieces, each within one Calendar Month
msg('Split Bills into Calendar Month Pieces.')
# Split all the rows into calendar month pieces and make a new DataFrame
recs=[]
for ix, row in dfu1.iterrows():
# it is *much* faster to modify a dictionary than a Pandas series
row_tmpl = row.to_dict()
# Pull out start and end of billing period; can drop the from & thru dates now
# doing split-up of billing period across months.
st = row_tmpl['from_dt']
en = row_tmpl['thru_dt']
del row_tmpl['from_dt']
del row_tmpl['thru_dt']
for piece in bu.split_period(st, en):
new_row = row_tmpl.copy()
new_row['cal_year'] = piece.cal_year
new_row['cal_mo'] = piece.cal_mo
# new_row['days_served'] = piece.days_served # not really needed
new_row['usage'] *= piece.bill_frac
new_row['cost'] *= piece.bill_frac
recs.append(new_row)
dfu2 = pd.DataFrame(recs, index=range(len(recs)))
# --- Sum Up the Pieces by Month
dfu3 = dfu2.groupby(
['site_id', 'service_type', 'cal_year', 'cal_mo', 'item_desc', 'units']
).sum()
dfu3 = dfu3.reset_index()
#--- Make a utility function object
msg('Make an Object containing Useful Utility Functions.')
dn = settings.OTHER_DATA_DIR_PATH
ut = bu.Util(dfu, dn)
# --- Add MMBtus Fiscal Year Info and MMBtus
msg('Add MMBtu Information.')
mmbtu = []
for ix, row in dfu3.iterrows():
row_mmbtu = ut.fuel_btus_per_unit(row.service_type, row.units) * row.usage / 1e6
if np.isnan(row_mmbtu): row_mmbtu = 0.0
mmbtu.append(row_mmbtu)
dfu3['mmbtu'] = mmbtu
# Now that original service types have been used to determine MMBtus,
# convert all service types to standard service types.
dfu3['service_type'] = dfu3.service_type.map(ut.service_to_category())
# This may cause multiple rows for a fiscal month and service type.
# Re-sum to reduce to least number of rows.
dfu4 = dfu3.groupby(
['site_id', 'service_type', 'cal_year', 'cal_mo', 'item_desc', 'units']
).sum()
dfu4 = dfu4.reset_index()
# Add the fiscal year information
msg('Add Fiscal Year Information.')
fyr = []
fmo = []
for cyr, cmo in zip(dfu4.cal_year, dfu4.cal_mo):
fis_yr, fis_mo = bu.calendar_to_fiscal(cyr, cmo)
fyr.append(fis_yr)
fmo.append(fis_mo)
dfu4['fiscal_year'] = fyr
dfu4['fiscal_mo'] = fmo
msg('Preprocessing complete!')
return dfu, dfu4, ut
#******************************************************************************
#******************************************************************************
# --------- Functions that That Produce Reports for One Site ----------
""" Each of these functions returns, at a minimum, a dictionary containing
data for the report template.
The functions frequently have some or all of the following input parameters, which
are documented here:
Input parameters:
site: The Site ID of the site to analyze.
df: The preprocessed Pandas DataFrame of Utility Bill information.
ut: The bench_util.Util object that provides additional site data
needed in the benchmarking process.
The functions all save the required graphs for their respective reports to the
directory determined in the graph_util.graph_filename_url() function.
"""
# --------------------- Building Information Report -----------------------
def building_info_report(site, ut, report_date_time):
"""
'report_date_time' is a string giving the date/time this benchmarking
script was run.
"""
# This function returns all the needed info for the report, except
# the date updated
info = ut.building_info(site)
return dict(
building_info = dict(
date_updated = report_date_time,
bldg = info
)
)
# -------------------------- Energy Index Report ----------------------------
def energy_index_report(site, df, ut):
"""As well as returning template data, this function writes a spreadsheet
that summarizes values for every building. The spreadsheet is written to
'output/extra_data/site_summary_FYYYYY.xlsx'.
"""
# Start a dictionary with the main key to hold the template data
template_data = {'energy_index_comparison': {}}
# --------- Table 1, Yearly Table
# Filter down to just this site's bills and only services that
# are energy services.
energy_services = bu.missing_energy_services([])
df1 = df.query('site_id==@site and service_type==@energy_services')
# Only do this table if there are energy services.
if not df1.empty:
# Sum Energy Costs and Usage
df2 = pd.pivot_table(df1, index='fiscal_year', values=['cost', 'mmbtu'], aggfunc=np.sum)
# Add a column showing number of months present in each fiscal year.
bu.add_month_count_column(df2, df1)
# Make a column with just the Heat MMBtu
dfe = df1.query("service_type=='electricity'").groupby('fiscal_year').sum()[['mmbtu']]
dfe.rename(columns={'mmbtu': 'elec_mmbtu'}, inplace = True)
df2 = df2.merge(dfe, how='left', left_index=True, right_index=True)
df2['elec_mmbtu'] = df2['elec_mmbtu'].fillna(0.0)
df2['heat_mmbtu'] = df2.mmbtu - df2.elec_mmbtu
# Add in degree days to DataFrame
months_present = bu.months_present(df1)
deg_days = ut.degree_days_yearly(months_present, site)
df2['hdd'] = deg_days
# Get building square footage and calculate EUIs and ECI.
sq_ft = ut.building_info(site)['sq_ft']
df2['eui'] = df2.mmbtu * 1e3 / sq_ft
df2['eci'] = df2.cost / sq_ft
df2['specific_eui'] = df2.heat_mmbtu * 1e6 / df2.hdd / sq_ft
# Restrict to full years
df2 = df2.query("month_count == 12").copy()
# Reverse the years
df2.sort_index(ascending=False, inplace=True)
# get the rows as a list of dictionaries and put into
# final template data dictionary.
template_data['energy_index_comparison']['yearly_table'] = {
'rows': bu.df_to_dictionaries(df2)
}
# ---------- Table 2, Details Table
# Use the last complete year for this site as the year for the Details
# table. If there was no complete year for the site, then use the
# last complete year for the entire dataset.
if 'df2' in locals() and len(df2):
last_complete_year = df2.index.max()
else:
# Determine month count by year for Electricity in entire dataset
# to determine the latest complete year.
electric_only = df.query("service_type == 'electricity'")
electric_months_present = bu.months_present(electric_only)
electric_mo_count = bu.month_count(electric_months_present)
last_complete_year = max(electric_mo_count[electric_mo_count==12].index)
# Filter down to just the records of the targeted fiscal year
df1 = df.query('fiscal_year == @last_complete_year')
# Get Total Utility cost by building. This includes non-energy utilities as well.
df2 = df1.pivot_table(index='site_id', values=['cost'], aggfunc=np.sum)
df2.columns = ['total_cost']
# Save this into the Final DataFrame that we will build up as we go.
df_final = df2.copy()
# Get a list of the Energy Services and restrict the data to
# just these services
energy_svcs = bu.missing_energy_services([])
df2 = df1.query('service_type == @energy_svcs')
# Summarize Cost by Service Type
df3 = pd.pivot_table(df2, index='site_id', columns='service_type', values='cost', aggfunc=np.sum)
# Add in any missing columns
bu.add_missing_columns(df3, energy_svcs)
# Change column names
cols = ['{}_cost'.format(col) for col in df3.columns]
df3.columns = cols
# Add a total energy cost column
df3['total_energy_cost'] = df3.sum(axis=1)
# Add a total Heat Cost Column
df3['total_heat_cost'] = df3.total_energy_cost.fillna(0.0) - df3.electricity_cost.fillna(0.0)
# Add this to the final DataFrame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Summarize MMBtu by Service Type
df3 = pd.pivot_table(df2, index='site_id', columns='service_type', values='mmbtu', aggfunc=np.sum)
# Add in any missing columns
bu.add_missing_columns(df3, energy_svcs)
# Change column names
cols = ['{}_mmbtu'.format(col) for col in df3.columns]
df3.columns = cols
# Add a total mmbtu column
df3['total_mmbtu'] = df3.sum(axis=1)
# Add a total Heat mmbtu Column
df3['total_heat_mmbtu'] = df3.total_mmbtu.fillna(0.0) - df3.electricity_mmbtu.fillna(0.0)
# Add this to the final DataFrame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Electricity kWh summed by building
df3 = pd.pivot_table(df2.query('units == "kWh"'), index='site_id', values='usage', aggfunc=np.sum)
df3.columns = ['electricity_kwh']
# Include in Final DF
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Electricity kW, both Average and Max by building
# First, sum up kW pieces for each month.
df3 = df2.query('units == "kW"').groupby(['site_id', 'fiscal_year', 'fiscal_mo']).sum()
df3 = pd.pivot_table(df3.reset_index(), index='site_id', values='usage', aggfunc=[np.mean, np.max])
df3.columns = ['electricity_kw_average', 'electricity_kw_max']
# Add into Final Frame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Add in Square footage info
df_bldg = ut.building_info_df()[['sq_ft']]
# Add into Final Frame. I do a merge here so as not to bring
# in buildings from the building info spreadsheet that are not in this
# dataset; this dataset has been restricted to one year.
df_final = pd.merge(df_final, df_bldg, how='left', left_index=True, right_index=True)
# Build a DataFrame that has monthly degree days for each site/year/month
# combination.
combos = set(zip(df1.site_id, df1.fiscal_year, df1.fiscal_mo))
df_dd = pd.DataFrame(data=list(combos), columns=['site_id', 'fiscal_year', 'fiscal_mo'])
ut.add_degree_days_col(df_dd)
# Add up the degree days by site (we've already filtered down to one year or less
# of data.)
dd_series = df_dd.groupby('site_id').sum()['degree_days']
# Put in final DataFrame
df_final = pd.concat([df_final, dd_series], axis=1)
# Add in a column that gives the number of months present for each site
# in this year. Then filter down to just the sites that have 12 months
# of data.
df_final.reset_index(inplace=True)
df_final['fiscal_year'] = last_complete_year
df_final.set_index(['site_id', 'fiscal_year'], inplace=True)
df_final = bu.add_month_count_column_by_site(df_final, df2)
df_final = df_final.query('month_count==12').copy()
df_final.reset_index(inplace=True)
df_final.set_index('site_id', inplace=True)
# Calculate per square foot values for each building.
df_final['eui'] = df_final.total_mmbtu * 1e3 / df_final.sq_ft
df_final['eci'] = df_final.total_energy_cost / df_final.sq_ft
df_final['specific_eui'] = df_final.total_heat_mmbtu * 1e6 / df_final.sq_ft / df_final.degree_days
# Save this to a spreadsheet, if it has not already been saved
fn = 'output/extra_data/site_summary_FY{}.xlsx'.format(last_complete_year)
if not os.path.exists(fn):
excel_writer = pd.ExcelWriter(fn)
df_final.to_excel(excel_writer, sheet_name='Sites')
# Get the totals across all buildings
totals_all_bldgs = df_final.sum()
# Total Degree-Days are not relevant
totals_all_bldgs.drop(['degree_days'], inplace=True)
# Only use the set of buildings that have some energy use and non-zero
# square footage to determine EUI's and ECI's
energy_bldgs = df_final.query("total_mmbtu > 0 and sq_ft > 0")
# Get total square feet, energy use, and energy cost for these buildings
# and calculate EUI and ECI
sq_ft_energy_bldgs = energy_bldgs.sq_ft.sum()
energy_in_energy_bldgs = energy_bldgs.total_mmbtu.sum()
energy_cost_in_energy_bldgs = energy_bldgs.total_energy_cost.sum()
totals_all_bldgs['eui'] = energy_in_energy_bldgs * 1e3 / sq_ft_energy_bldgs
totals_all_bldgs['eci'] = energy_cost_in_energy_bldgs / sq_ft_energy_bldgs
# For calculating heating specific EUI, further filter the set of
# buildings down to those that have heating fuel use.
# Get separate square footage total and weighted average degree-day for these.
heat_bldgs = energy_bldgs.query("total_heat_mmbtu > 0")
heat_bldgs_sq_ft = heat_bldgs.sq_ft.sum()
heat_bldgs_heat_mmbtu = heat_bldgs.total_heat_mmbtu.sum()
heat_bldgs_degree_days = (heat_bldgs.total_heat_mmbtu * heat_bldgs.degree_days).sum() / heat_bldgs.total_heat_mmbtu.sum()
totals_all_bldgs['specific_eui'] = heat_bldgs_heat_mmbtu * 1e6 / heat_bldgs_sq_ft / heat_bldgs_degree_days
# calculate a rank DataFrame
df_rank = pd.DataFrame()
for col in df_final.columns:
df_rank[col] = df_final[col].rank(ascending=False)
if site in df_final.index:
# The site exists in the DataFrame
site_info = df_final.loc[site]
site_pct = site_info / totals_all_bldgs
site_rank = df_rank.loc[site]
else:
# Site is not there, probabaly because not present in this year.
# Make variables with NaN values for all elements.
site_info = df_final.iloc[0].copy() # Just grab the first row to start with
site_info[:] = np.NaN # Put
site_pct = site_info.copy()
site_rank = site_info.copy()
# Make a final dictioary to hold all the results for this table
tbl2_data = {
'fiscal_year': 'FY {}'.format(last_complete_year),
'bldg': site_info.to_dict(),
'all': totals_all_bldgs.to_dict(),
'pct': site_pct.to_dict(),
'rank': site_rank.to_dict()
}
template_data['energy_index_comparison']['details_table'] = tbl2_data
# -------------- Energy Comparison Graphs ---------------
# Filter down to only services that are energy services.
energy_services = bu.missing_energy_services([])
df4 = df.query('service_type==@energy_services').copy()
# Sum Energy Costs and Usage
df5 = pd.pivot_table(df4, index=['site_id', 'fiscal_year'], values=['cost', 'mmbtu'], aggfunc=np.sum)
# Add a column showing number of months present in each fiscal year.
df5 = bu.add_month_count_column_by_site(df5, df4)
# Create an Electric MMBtu column so it can be subtracted from total to determine
# Heat MMBtu.
dfe = df4.query("service_type=='Electricity'").groupby(['site_id', 'fiscal_year']).sum()[['mmbtu']]
dfe.rename(columns={'mmbtu': 'elec_mmbtu'}, inplace = True)
df5 = df5.merge(dfe, how='left', left_index=True, right_index=True)
df5['elec_mmbtu'] = df5['elec_mmbtu'].fillna(0.0)
df5['heat_mmbtu'] = df5.mmbtu - df5.elec_mmbtu
# Add in degree-days:
# Create a DataFrame with site, year, month and degree-days, but only one row
# for each site/year/month combo.
dfd = df4[['site_id', 'fiscal_year', 'fiscal_mo']].copy()
dfd.drop_duplicates(inplace=True)
ut.add_degree_days_col(dfd)
# Use the agg function below so that a NaN will be returned for the year
# if any monthly values are NaN
dfd = dfd.groupby(['site_id', 'fiscal_year']).agg({'degree_days': lambda x: np.sum(x.values)})[['degree_days']]
df5 = df5.merge(dfd, how='left', left_index=True, right_index=True)
# Add in some needed building info like square footage, primary function
# and building category.
df_bldg = ut.building_info_df()
# Shrink to just the needed fields and remove index.
# Also, fill blank values with 'Unknown'.
df_info = df_bldg[['sq_ft', 'site_category', 'primary_func']].copy().reset_index()
df_info['site_category'] = df_info.site_category.fillna('Unknown')
df_info['primary_func'] = df_info.primary_func.fillna('Unknown Type')
# Also Remove the index from df5 and merge in building info
df5.reset_index(inplace=True)
df5 = df5.merge(df_info, how='left')
# Now calculate per square foot energy measures
df5['eui'] = df5.mmbtu * 1e3 / df5.sq_ft
df5['eci'] = df5.cost / df5.sq_ft
df5['specific_eui'] = df5.heat_mmbtu * 1e6 / df5.degree_days / df5.sq_ft
# Restrict to full years
df5 = df5.query("month_count == 12").copy()
# Make all of the comparison graphs
g1_fn, g1_url = gu.graph_filename_url(site, 'eci_func')
gu.building_type_comparison_graph(df5, 'eci', site, g1_fn)
g2_fn, g2_url = gu.graph_filename_url(site, 'eci_owner')
gu.building_owner_comparison_graph(df5, 'eci', site, g2_fn)
g3_fn, g3_url = gu.graph_filename_url(site, 'eui_func')
gu.building_type_comparison_graph(df5, 'eui', site, g3_fn)
g4_fn, g4_url = gu.graph_filename_url(site, 'eui_owner')
gu.building_owner_comparison_graph(df5, 'eui', site, g4_fn)
g5_fn, g5_url = gu.graph_filename_url(site, 'speui_func')
gu.building_type_comparison_graph(df5, 'specific_eui', site, g5_fn)
g6_fn, g6_url = gu.graph_filename_url(site, 'speui_owner')
gu.building_owner_comparison_graph(df5, 'specific_eui', site, g6_fn)
template_data['energy_index_comparison']['graphs'] = [
g1_url, g2_url, g3_url, g4_url, g5_url, g6_url
]
return template_data
# ------------------ Utility Cost Overview Report ----------------------
def utility_cost_report(site, df, ut):
"""As well as return the template data, this function returns a utility cost
DataFrame that is needed in the Heating Cost Analysis Report.
"""
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'cost']]
# Summarize cost by fiscal year and service type.
df2 = pd.pivot_table(
df1,
values='cost',
index=['fiscal_year'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
missing_services = bu.missing_services(df2.columns)
bu.add_columns(df2, missing_services)
# Add a Total column that sums the other columns
df2['total'] = df2.sum(axis=1)
# Add a percent change column
df2['pct_change'] = df2.total.pct_change()
# Add in degree days
months_present = bu.months_present(df1)
deg_days = ut.degree_days_yearly(months_present, site)
df2['hdd'] = deg_days
# Add in a column to show the numbers of months present for each year
# This will help to identify partial years.
bu.add_month_count_column(df2, df1)
# trim out the partial years
if len(df2):
df2 = df2.query("month_count == 12").copy()
# Reverse the DataFrame
df2.sort_index(ascending=False, inplace=True)
# Reset the index so the fiscal year column can be passed to the graphing utility
reset_df2 = df2.reset_index()
# Save a copy of this DataFrame to return for use in the
# Heating Cost Analysis Report
df_utility_cost = reset_df2.copy()
# Get appropriate file names and URLs for the graph
g1_fn, g1_url = gu.graph_filename_url(site, 'util_cost_ovw_g1')
# make the area cost distribution graph
utility_list = bu.all_services.copy()
gu.area_cost_distribution(reset_df2, 'fiscal_year', utility_list, g1_fn);
# make the stacked bar graph
g2_fn, g2_url = gu.graph_filename_url(site, 'util_cost_ovw_g2')
gu.create_stacked_bar(reset_df2, 'fiscal_year', utility_list, 'Utility Cost ($)', "Annual Cost by Utility Type",g2_fn)
# Put results into the final dictionary that will be passed to the Template.
# A function is used to convert the DataFrame into a list of dictionaries.
template_data = dict(
utility_cost_overview = dict(
graphs=[g1_url, g2_url],
table={'rows': bu.df_to_dictionaries(df2)}
)
)
return template_data, df_utility_cost
# -------------------- Energy Use and Cost Reports -----------------------
def energy_use_cost_reports(site, df, ut, df_utility_cost):
"""This does both the Energy Usage report and the Energy Cost & Usage
Pie charts.
'df_utility_cost' is a summary utility cost DataFrame from the prior
function.
As well as returnin the template data, this function returns a summary
energy usage dataframe.
"""
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
usage_df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'mmbtu']]
# Total mmbtu by service type and year.
usage_df2 = pd.pivot_table(
usage_df1,
values='mmbtu',
index=['fiscal_year'],
columns=['service_type'],
aggfunc=np.sum
)
# drop non-energy columns
non_energy_servics = list(set(bu.all_services) - set(bu.all_energy_services))
usage_df2 = usage_df2[usage_df2.columns.difference(non_energy_servics)]
# Add in columns for the missing services
missing_services = bu.missing_energy_services(usage_df2.columns)
bu.add_columns(usage_df2, missing_services)
# Add a Total column that sums the other columns
usage_df2['total_energy'] = usage_df2.sum(axis=1)
cols = ['{}_mmbtu'.format(col) for col in usage_df2.columns]
usage_df2.columns = cols
# Create a list of columns to loop through and calculate percent total energy
usage_cols = list(usage_df2.columns.values)
usage_cols.remove('total_energy_mmbtu')
for col in usage_cols:
col_name = col.split('_mmbtu')[0] + "_pct"
usage_df2[col_name] = usage_df2[col] / usage_df2.total_energy_mmbtu
# Add in degree days
months_present = bu.months_present(usage_df1)
deg_days = ut.degree_days_yearly(months_present, site)
usage_df2['hdd'] = deg_days
# Add in a column to show the numbers of months present for each year
# This will help to identify partial years.
mo_count = bu.month_count(months_present)
usage_df2['month_count'] = mo_count
# Calculate total heat energy and normalized heating usage
usage_df2['total_heat_mmbtu'] = usage_df2.total_energy_mmbtu - usage_df2.electricity_mmbtu
usage_df2['total_specific_heat'] = usage_df2.total_heat_mmbtu * 1000 / usage_df2.hdd
usage_df2 = usage_df2.query("month_count == 12").copy()
# Reverse the DataFrame
usage_df2.sort_index(ascending=False, inplace=True)
usage_df2 = usage_df2.drop('month_count', axis=1)
# --- Create Energy Usage Overview Graphs
# Reset the index so the fiscal year column can be passed to the graphing function
reset_usage_df2 = usage_df2.reset_index()
p4g2_filename, p4g2_url = gu.graph_filename_url(site, 'energy_usage_ovw_g2')
# Create the area graph
gu.area_use_distribution(reset_usage_df2, 'fiscal_year', usage_cols, p4g2_filename)
# The stacked bar graph
p4g1_filename, p4g1_url = gu.graph_filename_url(site, 'energy_usage_ovw_g1')
gu.energy_use_stacked_bar(reset_usage_df2, 'fiscal_year', usage_cols, p4g1_filename)
# Convert df to dictionary
energy_use_overview_rows = bu.df_to_dictionaries(usage_df2)
# Put data and graphs into a dictionary
template_data = dict(
energy_usage_overview = dict(
graphs=[p4g1_url, p4g2_url],
table={'rows': energy_use_overview_rows}
)
)
# Make a utility list to include only energy-related columns
utility_list = bu.all_energy_services.copy()
pie_urls = gu.usage_pie_charts(usage_df2.fillna(0.0), usage_cols, 1, 'energy_usage_pie', site)
# Make the other graphs and append the URLs
df_ut_cost = df_utility_cost.set_index('fiscal_year') # need fiscal_year index for graphs
pie_urls += gu.usage_pie_charts(df_ut_cost.fillna(0.0),
utility_list,
2,
'energy_cost_pie',
site)
# Add pie charts to template dictionary
template_data['energy_cost_usage'] = dict(graphs=pie_urls)
return template_data, usage_df2
# -------------------- Electrical Usage and Cost Reports -------------------------
def electrical_usage_and_cost_reports(site, df):
"""This does both the Electrical Usage and Electrical
Cost reports."""
site_df = df.query("site_id == @site")
electric_df = site_df.query("units == 'kWh' or units == 'kW'")
if 'electricity' in site_df.service_type.unique() and site_df.query("service_type == 'electricity'")['usage'].sum(axis=0) > 0:
# only look at elecricity records
electric_pivot_monthly = pd.pivot_table(electric_df,
index=['fiscal_year', 'fiscal_mo'],
columns=['units'],
values='usage',
aggfunc=np.sum)
else:
# Create an empty dataframe with the correct index
electric_pivot_monthly = site_df.groupby(['fiscal_year', 'fiscal_mo']).mean()[[]]
# Add in missing electricity columns and fill them with zeros
electric_pivot_monthly = bu.add_missing_columns(electric_pivot_monthly, ['kWh', 'kW'])
electric_pivot_monthly.kW.fillna(0.0)
electric_pivot_monthly.kWh.fillna(0.0)
# Do a month count for the elecricity bills
elec_months_present = bu.months_present(electric_pivot_monthly.reset_index())
elec_mo_count = bu.month_count(elec_months_present)
elec_mo_count_df = pd.DataFrame(elec_mo_count)
elec_mo_count_df.index.name = 'fiscal_year'
if 'kWh' in site_df.units.unique() or 'kW' in site_df.units.unique():
electric_pivot_annual = pd.pivot_table(electric_df,
index=['fiscal_year'],
columns=['units'],
values='usage',
aggfunc=np.sum
)
else:
# Create an empty dataframe with the correct index
electric_pivot_annual = site_df.groupby(['fiscal_year']).mean()[[]]
electric_pivot_annual = bu.add_missing_columns(electric_pivot_annual, ['kWh', 'kW'])
electric_use_annual = electric_pivot_annual[['kWh']]
electric_use_annual = electric_use_annual.rename(columns={'kWh':'ann_electric_usage_kWh'})
# Get average annual demand usage
electric_demand_avg = electric_pivot_monthly.groupby(['fiscal_year']).mean()
electric_demand_avg = electric_demand_avg[['kW']]
electric_demand_avg = electric_demand_avg.rename(columns={'kW': 'avg_demand_kW'})
# Find annual maximum demand usage
electric_demand_max = electric_pivot_monthly.groupby(['fiscal_year']).max()
electric_demand_max = electric_demand_max[['kW']]
electric_demand_max = electric_demand_max.rename(columns={'kW': 'max_demand_kW'})
# Combine dataframes
electric_demand_join = pd.merge(electric_demand_max, electric_demand_avg, how='outer', left_index=True, right_index=True)
annual_electric_data = pd.merge(electric_demand_join, electric_use_annual, how='outer', left_index=True, right_index=True)
# Add percent change columns
annual_electric_data['usage_pct_change'] = annual_electric_data.ann_electric_usage_kWh.pct_change()
annual_electric_data['avg_demand_pct_change'] = annual_electric_data.avg_demand_kW.pct_change()
annual_electric_data['max_demand_pct_change'] = annual_electric_data.max_demand_kW.pct_change()
annual_electric_data = annual_electric_data.rename(columns={'avg_demand_kW': 'Average kW',
'ann_electric_usage_kWh': 'Total kWh'})
annual_electric_data = pd.merge(annual_electric_data, elec_mo_count_df, left_index=True, right_index=True, how='left')
annual_electric_data = annual_electric_data.query("month == 12")
annual_electric_data = annual_electric_data.sort_index(ascending=False)
annual_electric_data = annual_electric_data.rename(columns={'max_demand_kW':'kw_max',
'Average kW':'kw_avg',
'Total kWh':'kwh',
'usage_pct_change':'kwh_pct_change',
'avg_demand_pct_change':'kw_avg_pct_change',
'max_demand_pct_change':'kw_max_pct_change'})
annual_electric_data = annual_electric_data.drop('month', axis=1)
# ---- Create Electrical Usage Analysis Graphs - Page 6
# Axes labels
ylabel1 = 'Electricity Usage [kWh]'
ylabel2 = 'Electricity Demand [kW]'
p6g1_filename, p6g1_url = gu.graph_filename_url(site, "electricity_usage_g1")
gu.stacked_bar_with_line(annual_electric_data.reset_index(), 'fiscal_year', ['kwh'], 'kw_avg',
ylabel1, ylabel2, "Annual Electricity Usage and Demand", p6g1_filename)
p6g2_filename, p6g2_url = gu.graph_filename_url(site, "electricity_usage_g2")
gu.create_monthly_profile(electric_pivot_monthly, 'kWh', 'Monthly Electricity Usage Profile [kWh]', 'blue',
"Monthly Electricity Usage Profile by Fiscal Year",p6g2_filename)
# Convert df to dictionary
electric_use_rows = bu.df_to_dictionaries(annual_electric_data)
# Put data and graphs in a dictionary
template_data = dict(
electrical_usage_analysis = dict(
graphs=[p6g1_url, p6g2_url],
table={'rows': electric_use_rows}
)
)
# only look at elecricity records
electric_cost_df = site_df.query("service_type == 'electricity'").copy()
# Costs don't always have units, so split the data into demand charges and usage charges (which includes other charges)
electric_cost_df['cost_categories'] = np.where(electric_cost_df.item_desc.isin(['KW Charge', 'On peak demand', 'Demand Charge']),
'demand_cost', 'usage_cost')
if 'electricity' in site_df.service_type.unique():
# Sum costs by demand and usage
electric_annual_cost = pd.pivot_table(electric_cost_df,
index=['fiscal_year'],
columns=['cost_categories'],
values='cost',
aggfunc=np.sum
)
else:
electric_annual_cost = site_df.groupby(['fiscal_year']).mean()[[]]
electric_annual_cost = bu.add_missing_columns(electric_annual_cost, ['demand_cost', 'usage_cost'] ,0.0)
# Create a total column
electric_annual_cost['Total Cost'] = electric_annual_cost[['demand_cost', 'usage_cost']].sum(axis=1)
# Add percent change columns
electric_annual_cost['usage_cost_pct_change'] = electric_annual_cost.usage_cost.pct_change()
electric_annual_cost['demand_cost_pct_change'] = electric_annual_cost.demand_cost.pct_change()
electric_annual_cost['total_cost_pct_change'] = electric_annual_cost['Total Cost'].pct_change()
# Left join the cost data to the annual electric data, which only shows complete years
electric_use_and_cost = pd.merge(annual_electric_data, electric_annual_cost, left_index=True, right_index=True, how='left')
electric_use_and_cost = electric_use_and_cost.sort_index(ascending=False)
electric_use_and_cost = electric_use_and_cost.drop(['kw_max', 'kw_max_pct_change'], axis=1)
electric_use_and_cost = electric_use_and_cost.rename(columns={'demand_cost':'kw_avg_cost',
'usage_cost':'kwh_cost',
'Total Cost':'total_cost',
'usage_cost_pct_change':'kwh_cost_pct_change',
'demand_cost_pct_change':'kw_avg_cost_pct_change'
})
# --- Create Electrical Cost Analysis Graphs
p7g1_filename, p7g1_url = gu.graph_filename_url(site, "electrical_cost_g1")
renamed_use_and_cost = electric_use_and_cost.rename(columns={'kwh_cost':'Electricity Usage Cost [$]',
'kw_avg_cost':'Electricity Demand Cost [$]'})
gu.create_stacked_bar(renamed_use_and_cost.reset_index(), 'fiscal_year', ['Electricity Usage Cost [$]',
'Electricity Demand Cost [$]'],
'Electricity Cost [$]', "Annual Electricity Usage and Demand Costs", p7g1_filename)
# Create Monthly Profile of Electricity Demand
p7g2_filename, p7g2_url = gu.graph_filename_url(site, "electrical_cost_g2")
gu.create_monthly_profile(electric_pivot_monthly, 'kW', 'Monthly Electricity Demand Profile [kW]', 'blue',
"Monthly Electricity Demand Profile by Fiscal Year",p7g2_filename)
# Convert df to dictionary
electric_cost_rows = bu.df_to_dictionaries(electric_use_and_cost)
# Add data and graphs to main dictionary
template_data['electrical_cost_analysis'] = dict(
graphs=[p7g1_url, p7g2_url],
table={'rows': electric_cost_rows},
)
return template_data
# --------------------Heating Usage and Cost Reports ------------------------
def heating_usage_cost_reports(site, df, ut, df_utility_cost, df_usage):
'''This produces both the Heating Usage and the Heating Cost
reports.
'df_utility_cost': The utility cost DataFrame produced in the
utility_cost_report function above.
'df_usage': A summary energy usage DataFrame produced in the prior
energy_use_cost_reports function.
'''
# Abort if no heating usage
if df_usage.empty:
return {}
heat_service_mmbtu_list = []
for heat_service in bu.all_heat_services:
heat_service_mmbtu_list.append(heat_service + '_mmbtu')
keep_cols_list = heat_service_mmbtu_list + ['hdd', 'total_heat_mmbtu']
heating_usage = df_usage[keep_cols_list].copy()
# Add in percent change columns
# First sort so the percent change column is correct and then re-sort the other direction
heating_usage.sort_index(ascending=True, inplace=True)
for heating_service in heat_service_mmbtu_list:
new_col_name = heating_service.split('_mmbtu')[0] + '_pct_change'
heating_usage[new_col_name] = heating_usage[heating_service].pct_change()
heating_usage['total_heat_pct_change'] = heating_usage.total_heat_mmbtu.pct_change()
# Now reset the sorting
heating_usage.sort_index(ascending=False, inplace=True)
# Get the number of gallons, ccf, and cords of wood by converting MMBTUs using the supplied conversions
# This is hard-coded because I couldn't figure out how to do it more generically
heating_usage['fuel_oil_usage'] = heating_usage.fuel_oil_mmbtu * 1000000 / ut.service_category_info('fuel_oil')[1]
heating_usage['natural_gas_usage'] = heating_usage.natural_gas_mmbtu * 1000000 / ut.service_category_info('natural_gas')[1]
heating_usage['propane_usage'] = heating_usage.propane_mmbtu * 1000000 / ut.service_category_info('propane')[1]
heating_usage['wood_usage'] = heating_usage.wood_mmbtu * 1000000 / ut.service_category_info('wood')[1]
heating_usage['coal_usage'] = heating_usage.coal_mmbtu * 1000000 / ut.service_category_info('coal')[1]
# ----- Create Heating Usage Analysis Graphs
p8g1_filename, p8g1_url = gu.graph_filename_url(site, "heating_usage_g1")
gu.stacked_bar_with_line(heating_usage.reset_index(), 'fiscal_year', heat_service_mmbtu_list, 'hdd',
'Heating Fuel Usage [MMBTU/yr]', 'Heating Degree Days [Base 65F]',
"Annual Heating Energy Use and Degree Day Comparison", p8g1_filename)
# --- Create Monthly Heating Usage dataframe for graph
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
usage_df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'mmbtu']]
monthly_heating = pd.pivot_table(usage_df1,
values='mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating.columns)
bu.add_columns(monthly_heating, missing_services)
# Use only heat services
monthly_heating = monthly_heating[bu.all_heat_services]
# Create a total heating column
monthly_heating['total_heating_energy'] = monthly_heating.sum(axis=1)
p8g2_filename, p8g2_url = gu.graph_filename_url(site, "heating_usage_g2")
gu.create_monthly_profile(monthly_heating, 'total_heating_energy', "Monthly Heating Energy Profile [MMBTU]", 'red',
"Monthly Heating Energy Usage Profile by Fiscal Year", p8g2_filename)
# Convert df to dictionary
heating_use_rows = bu.df_to_dictionaries(heating_usage)
# Add data and graphs to a dictionary
template_data = dict(
heating_usage_analysis = dict(
graphs=[p8g1_url, p8g2_url],
table={'rows': heating_use_rows}
)
)
# Using the Utility Cost DataFrame passed in as a parameter,
# Put DataFrame back into ascending order, as we need to calculate
# a percent change column.
# Index is NOT Years
df_utility_cost.sort_values('fiscal_year', ascending=True, inplace=True)
# Make a total heat cost column and it's percent change
df_utility_cost['total_heat_cost'] = df_utility_cost[bu.all_heat_services].sum(axis=1)
df_utility_cost['total_heat_cost_pct_change'] = df_utility_cost.total_heat_cost.pct_change()
# Now back in descending order
df_utility_cost.sort_values('fiscal_year', ascending=False, inplace=True)
cols_to_keep = bu.all_heat_services + ['fiscal_year', 'total_heat_cost','total_heat_cost_pct_change']
# Use only necessary columns
heating_cost = df_utility_cost[cols_to_keep]
cost_cols = [col + "_cost" for col in bu.all_heat_services]
cost_col_dict = dict(zip(bu.all_heat_services, cost_cols))
# Change column names so they aren't the same as the heating usage dataframe
heating_cost = heating_cost.rename(columns=cost_col_dict)
# Combine the heating cost and heating use dataframes
heating_cost_and_use = pd.merge(heating_cost, heating_usage, left_on='fiscal_year', right_index=True, how='right')
# Put DataFrame in ascending order to calculate percent change
heating_cost_and_use.sort_values('fiscal_year', ascending=True, inplace=True)
# This will be used to shorten final dataframe
final_cost_col_list = list(cost_cols)
# Create percent change columns
for col in cost_cols:
new_col = col.split('_cost')[0] + '_pct_change'
heating_cost_and_use[new_col] = heating_cost_and_use[col].pct_change()
final_cost_col_list.append(new_col)
# Back to descending order
heating_cost_and_use.sort_values('fiscal_year', ascending=False, inplace=True)
# Create unit cost columns
for col in cost_cols:
n_col = col.split('_cost')[0] + '_unit_cost'
mmbtu_col = col.split('_cost')[0] + '_mmbtu'
heating_cost_and_use[n_col] = heating_cost_and_use[col] / heating_cost_and_use[mmbtu_col]
final_cost_col_list.append(n_col)
heating_cost_and_use['building_heat_unit_cost'] = heating_cost_and_use.total_heat_cost / heating_cost_and_use.total_heat_mmbtu
# Remove all columns not needed for the Heating Cost Analysis Table
final_cost_col_list = final_cost_col_list + ['fiscal_year','building_heat_unit_cost',
'total_heat_cost','total_heat_cost_pct_change']
heating_cost_and_use = heating_cost_and_use[final_cost_col_list]
# ---- Create DataFrame with the Monthly Average Price Per MMBTU for All Sites
# Filter out natural gas customer charges as the unit cost goes to infinity if there is a charge but no use
df_no_gas_cust_charges = df.drop(df[(df['service_type'] == 'natural_gas') & (df['units'] != 'CCF')].index)
# Filter out records with zero usage, which correspond to things like customer charges, etc.
nonzero_usage = df_no_gas_cust_charges.query("usage > 0")
nonzero_usage = nonzero_usage.query("mmbtu > 0")
# Filter out zero cost or less records (these are related to waste oil)
nonzero_usage = nonzero_usage.query("cost > 0")
# Get the total fuel cost and usage for all buildings by year and month
grouped_nonzero_usage = nonzero_usage.groupby(['service_type', 'fiscal_year', 'fiscal_mo']).sum()
# Divide the total cost for all building by the total usage for all buildings so that the average is weighted correctly
grouped_nonzero_usage['avg_price_per_mmbtu'] = grouped_nonzero_usage.cost / grouped_nonzero_usage.mmbtu
# Get only the desired outcome, price per million BTU for each fuel type, and the number of calendar months it is based on
# i.e. the number of months of bills for each fuel for all buildings for that particular month.
grouped_nonzero_usage = grouped_nonzero_usage[['avg_price_per_mmbtu', 'cal_mo']]
# Drop electricity from the dataframe.
grouped_nonzero_usage = grouped_nonzero_usage.reset_index()
grouped_nonzero_heatfuel_use = grouped_nonzero_usage.query("service_type != 'Electricity'")
# Create a column for each service type
grouped_nonzero_heatfuel_use = pd.pivot_table(grouped_nonzero_heatfuel_use,
values='avg_price_per_mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns='service_type'
)
grouped_nonzero_heatfuel_use = grouped_nonzero_heatfuel_use.reset_index()
# --- Monthly Cost Per MMBTU: Data and Graphs
# Exclude other charges from the natural gas costs. This is because the unit costs for natural gas go to infinity
# when there is zero usage but a customer charge
cost_df1 = df.drop(df[(df['service_type'] == 'natural_gas') & (df['units'] != 'CCF')].index)
# Create cost dataframe for given site from processed data
cost_df1 = cost_df1.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'cost']]
# Split out by service type
monthly_heating_cost = pd.pivot_table(cost_df1,
values='cost',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating_cost.columns)
bu.add_columns(monthly_heating_cost, missing_services)
monthly_heating_cost = monthly_heating_cost[bu.all_heat_services]
# Create a total heating column
monthly_heating_cost['total_heating_cost'] = monthly_heating_cost.sum(axis=1)
monthly_heating_cost = monthly_heating_cost.rename(columns=cost_col_dict)
monthly_heat_energy_and_use = pd.merge(monthly_heating_cost, monthly_heating, left_index=True, right_index=True, how='outer')
# Create unit cost columns in $ / MMBTU for each fuel type
for col in cost_cols:
n_col_name = col.split('_cost')[0] + "_unit_cost"
use_col_name = col.split('_cost')[0]
monthly_heat_energy_and_use[n_col_name] = monthly_heat_energy_and_use[col] / monthly_heat_energy_and_use[use_col_name]
monthly_heat_energy_and_use['building_unit_cost'] = monthly_heat_energy_and_use.total_heating_cost / monthly_heat_energy_and_use.total_heating_energy
# Reset the index for easier processing
monthly_heat_energy_and_use = monthly_heat_energy_and_use.reset_index()
# Add in unit costs for fuels that are currently blank
# Get only columns that exist in the dataframe
available_service_list = list(grouped_nonzero_heatfuel_use.columns.values)
heat_services_in_grouped_df = list(set(bu.all_heat_services) & set(available_service_list))
unit_cost_cols = [col + "_unit_cost" for col in heat_services_in_grouped_df]
service_types = [col + "_avg_unit_cost" for col in heat_services_in_grouped_df]
unit_cost_dict = dict(zip(unit_cost_cols,service_types))
# Add in average unit costs calculated from all sites for each month
monthly_heat_energy_and_use = pd.merge(monthly_heat_energy_and_use, grouped_nonzero_heatfuel_use,
left_on=['fiscal_year', 'fiscal_mo'], right_on=['fiscal_year', 'fiscal_mo'],
how='left', suffixes=('', '_avg_unit_cost'))
# Check each column to see if it is NaN (identified when the value does not equal itself) and if it is, fill with the average
# price per MMBTU taken from all sites
for col, service in unit_cost_dict.items():
monthly_heat_energy_and_use[col] = np.where(monthly_heat_energy_and_use[col] != monthly_heat_energy_and_use[col],
monthly_heat_energy_and_use[service],
monthly_heat_energy_and_use[col])
# Add calendar year and month columns
cal_year = []
cal_mo = []
for fiscal_year, fiscal_mo in zip(monthly_heat_energy_and_use.fiscal_year, monthly_heat_energy_and_use.fiscal_mo):
CalYear, CalMo = bu.fiscal_to_calendar(fiscal_year, fiscal_mo)
cal_year.append(CalYear)
cal_mo.append(CalMo)
monthly_heat_energy_and_use['calendar_year'] = cal_year
monthly_heat_energy_and_use['calendar_mo'] = cal_mo
# Create a date column using the calendar year and month to pass to the graphing function
def get_date(row):
return datetime.date(year=row['calendar_year'], month=row['calendar_mo'], day=1)
monthly_heat_energy_and_use['date'] = monthly_heat_energy_and_use[['calendar_year','calendar_mo']].apply(get_date, axis=1)
p9g1_filename, p9g1_url = gu.graph_filename_url(site, "heating_cost_g1")
gu.fuel_price_comparison_graph(monthly_heat_energy_and_use, 'date', unit_cost_cols, 'building_unit_cost', p9g1_filename)
# --- Realized Savings from Fuel Switching: Page 9, Graph 2
# Create an indicator for whether a given heating fuel is available for the facility. This is done by checking the use for all
# months- if it is zero, then that building doesn't have the option to use that type of fuel.
for col in bu.all_heat_services:
new_col_name = col + "_available"
monthly_heat_energy_and_use[new_col_name] = np.where(monthly_heat_energy_and_use[col].sum() == 0, 0, 1)
# Calculate what it would have cost if the building used only one fuel type
available_cols = []
unit_cost_cols_2 = []
for col in bu.all_heat_services:
available_cols.append(col + "_available")
unit_cost_cols_2.append(col + "_unit_cost")
available_dict = dict(zip(unit_cost_cols_2, available_cols))
hypothetical_cost_cols = []
for unit_cost, avail_col in available_dict.items():
new_col_name = unit_cost + "_hypothetical"
hypothetical_cost_cols.append(new_col_name)
monthly_heat_energy_and_use[new_col_name] = monthly_heat_energy_and_use[unit_cost] * monthly_heat_energy_and_use.total_heating_energy * monthly_heat_energy_and_use[avail_col]
# Calculate the monthly savings to the building by not using the most expensive available fuel entirely
monthly_heat_energy_and_use['fuel_switching_savings'] = monthly_heat_energy_and_use[hypothetical_cost_cols].max(axis=1) - monthly_heat_energy_and_use.total_heating_cost
# Sort dataframe to calculate cumulative value
monthly_heat_energy_and_use = monthly_heat_energy_and_use.sort_values(by='date', ascending=True)
# Calculate cumulative value
monthly_heat_energy_and_use['cumulative_fuel_switching_savings'] = np.cumsum(monthly_heat_energy_and_use.fuel_switching_savings)
p9g2_filename, p9g2_url = gu.graph_filename_url(site, "heating_cost_g2")
gu.create_monthly_line_graph(monthly_heat_energy_and_use, 'date', 'cumulative_fuel_switching_savings',
'Cumulative Fuel Switching Savings Realized [$]', p9g2_filename)
# Convert df to dictionary
heating_cost_rows = bu.df_to_dictionaries(heating_cost_and_use)
# Add data and graphs to main dictionary
template_data['heating_cost_analysis'] = dict(
graphs=[p9g1_url, p9g2_url],
table={'rows': heating_cost_rows},
)
return template_data
# ---------------------- Water Analysis Table ---------------------------
def water_report(site, df):
water_use = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo','cost', 'usage', 'units']]
# Abort if no data
if water_use.empty:
return {}
# Create month count field for all months that have water and sewer bills
water_use_only = water_use.query("service_type == 'water'")
water_months_present = bu.months_present(water_use_only)
water_mo_count = bu.month_count(water_months_present)
# Create annual water gallon usage dataframe
water_gal_df = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
gal_missing_services = bu.missing_services(water_gal_df.columns)
bu.add_columns(water_gal_df, gal_missing_services)
# Use only required columns
water_gal_df = water_gal_df[['water']]
# Calculate percent change column
water_gal_df['water_use_pct_change'] = water_gal_df.water.pct_change()
# Create annual water and sewer cost dataframe
water_cost_df = pd.pivot_table(water_use,
values='cost',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
water_missing_services = bu.missing_services(water_cost_df.columns)
bu.add_columns(water_cost_df, water_missing_services)
# Calculate totals, percent change
cols_to_remove = bu.all_energy_services + ['refuse']
water_cost_df = water_cost_df[water_cost_df.columns.difference(cols_to_remove)]
rename_dict = {'sewer': 'Sewer Cost',
'water': 'Water Cost'}
water_cost_df = water_cost_df.rename(columns=rename_dict)
# First check to make sure sewer data is included; if so, calculate total cost
water_cost_df['total_water_sewer_cost'] = water_cost_df.sum(axis=1)
water_cost_df['water_cost_pct_change'] = water_cost_df['Water Cost'].pct_change()
water_cost_df['sewer_cost_pct_change'] = water_cost_df['Sewer Cost'].pct_change()
water_cost_df['total_water_sewer_cost_pct_change'] = water_cost_df.total_water_sewer_cost.pct_change()
# Merge use and cost dataframes
water_use_and_cost = pd.merge(water_cost_df, water_gal_df, left_index=True, right_index=True, how='left')
water_use_and_cost['water_unit_cost'] = water_use_and_cost.total_water_sewer_cost / water_use_and_cost.water
water_use_and_cost['water_unit_cost_pct_change'] = water_use_and_cost.water_unit_cost.pct_change()
# Use only complete years
water_use_and_cost['month_count'] = water_mo_count
if len(water_use_and_cost):
water_use_and_cost = water_use_and_cost.query("month_count == 12")
water_use_and_cost = water_use_and_cost.drop('month_count', axis=1)
water_use_and_cost = water_use_and_cost.sort_index(ascending=False)
water_use_and_cost = water_use_and_cost.rename(columns={'Sewer Cost':'sewer_cost',
'Water Cost':'water_cost',
'total_water_sewer_cost':'total_cost',
'total_water_sewer_cost_pct_change':'total_cost_pct_change',
'water':'total_usage',
'water_use_pct_change':'total_usage_pct_change',
'water_unit_cost':'total_unit_cost',
'water_unit_cost_pct_change':'total_unit_cost_pct_change'
})
# ---- Create Water Cost Stacked Bar Graph - Page 10 Graph 1
p10g1_filename, p10g1_url = gu.graph_filename_url(site, "water_analysis_g1")
gu.create_stacked_bar(water_use_and_cost.reset_index(), 'fiscal_year', ['sewer_cost', 'water_cost'],
'Utility Cost [$]', "Annual Water and Sewer Costs", p10g1_filename)
# ---- Create Monthly Water Profile Graph
# Create monthly water gallon dataframe
water_gal_df_monthly = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
p10g2_filename, p10g2_url = gu.graph_filename_url(site, "water_analysis_g2")
if 'water' in list(water_gal_df_monthly.columns.values):
gu.create_monthly_profile(water_gal_df_monthly, 'water', 'Monthly Water Usage Profile [gallons]', 'green',
"Monthly Water Usage Profile by Fiscal Year", p10g2_filename)
else:
shutil.copyfile(os.path.abspath('no_data_available.png'), os.path.abspath(p10g2_filename))
# Convert df to dictionary
water_rows = bu.df_to_dictionaries(water_use_and_cost)
# Return data and graphs in a dictionary
return dict(
water_analysis = dict(
graphs=[p10g1_url, p10g2_url],
table={'rows': water_rows}
)
)
#******************************************************************************
#******************************************************************************
# ----------------------------- Misc Functions --------------------------------
# Time when the script started running. Used to determine cumulative time
start_time = None
def msg(the_message):
"""Prints a message to the console, along cumulative elapsed time
since the script started.
"""
print('{} ({:.1f} s)'.format(the_message, time.time() - start_time))
#*****************************************************************************
#*****************************************************************************
# ----------------------------- Main Script -----------------------------------
if __name__=="__main__":
# Save the time when the script started, so cumulative times can be
# shown in messages printed to the console.
start_time = time.time()
msg('Benchmarking Script starting!')
# Get a Date/Time String for labeling this report
report_date_time = datetime.datetime.now().strftime('%B %d, %Y %I:%M %p')
# Read and Preprocess the data in the Utility Bill file, acquiring
# a DataFrame of preprocessed data and a utility function object that is
# needed by the analysis routines.
if settings.USE_DATA_FROM_LAST_RUN:
# Read the data from the pickle files that were created during the
# last run of the script.
df = pickle.load(open('df_processed.pkl', 'rb'))
util_obj = pickle.load(open('util_obj.pkl', 'rb'))
msg('Data from Last Run has been loaded.')
else:
# Run the full reading and processing routine
df_raw, df, util_obj = preprocess_data()
# Pickle the DataFrames and utility object for fast
# loading later, if needed
df_raw.to_pickle('df_raw.pkl')
df.to_pickle('df_processed.pkl')
pickle.dump(util_obj, open('util_obj.pkl', 'wb'))
# We no longer need the raw DataFrame, so delete it to
# save memory
del df_raw
# Clean out the output directories to prepare for the new report files
out_dirs = [
'output/debug',
'output/extra_data',
'output/images',
'output/sites'
]
for out_dir in out_dirs:
for fn in glob.glob(os.path.join(out_dir, '*')):
if not 'placeholder' in fn: # don't delete placeholder file
os.remove(fn)
# Create Index (Home) page
site_cats = util_obj.site_categories_and_buildings()
template_data = dict(
date_updated = report_date_time,
categories = site_cats
)
ix_template = template_util.get_template('index.html')
result = ix_template.render(template_data)
open('output/index.html', 'w').write(result)
# ------ Loop through the sites, creating a report for each
# Get the template used to create the site benchmarking report.
site_template = template_util.get_template('sites/index.html')
site_count = 0 # tracks number of site processed
for site_id in util_obj.all_sites():
# This line shortens the calculation process to start with whatever
# Site ID you want to start with
# if site_id < '1187': continue
msg("Site '{}' is being processed...".format(site_id))
# Gather template data from each of the report sections. The functions
# return a dictionary with variables needed by the template. Sometimes other
# values are returned from the function, often for use in later reports.
template_data = building_info_report(site_id, util_obj, report_date_time)
report_data = energy_index_report(site_id, df, util_obj)
template_data.update(report_data)
report_data, df_utility_cost = utility_cost_report(site_id, df, util_obj)
template_data.update(report_data)
# Filter down to just this site's bills and only services that
# are energy services in order to determine whether there are any
# energy services. Only do energy reports if there are some energy
# services
energy_services = bu.missing_energy_services([])
df1 = df.query('site_id==@site_id and service_type==@energy_services')
if not df1.empty:
report_data, df_usage = energy_use_cost_reports(site_id, df, util_obj, df_utility_cost)
template_data.update(report_data)
report_data = electrical_usage_and_cost_reports(site_id, df)
template_data.update(report_data)
#df_utility_cost.to_pickle('df_utility_cost.pkl')
#df_usage.to_pickle('df_usage.pkl')
#import sys; sys.exit()
report_data = heating_usage_cost_reports(site_id, df, util_obj, df_utility_cost, df_usage)
template_data.update(report_data)
report_data = water_report(site_id, df)
template_data.update(report_data)
# save template data variables to debug file if requested
if settings.WRITE_DEBUG_DATA:
with open('output/debug/{}.vars'.format(site_id), 'w') as fout:
pprint.pprint(template_data, fout)
# create report file
result = site_template.render(template_data)
with open('output/sites/{}.html'.format(site_id), 'w') as fout:
fout.write(result)
site_count += 1
if site_count == settings.MAX_NUMBER_SITES_TO_RUN:
break
print()
msg('Benchmarking Script Complete!')
|
mit
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/matplotlib/backend_bases.py
|
1
|
94467
|
"""
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import cStringIO
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_backend_d = {}
def register_backend(format, backend_class):
_backend_d[format] = backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside "ipython --pylab";
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in --pylab mode.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive():
self.mainloop()
def mainloop(self):
pass
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, path_ids, offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([gc.get_linewidth()], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None])
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, gc, path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles, antialiaseds,
urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
fg = edgecolors[i % Nedgecolors]
if Nfacecolors == 0 and len(fg)==4:
gc0.set_alpha(fg[3])
gc0.set_foreground(fg)
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
if rgbFace[3] == 0:
rgbFace = None
else:
gc0.set_alpha(rgbFace[3])
rgbFace = rgbFace[:3]
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False, usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath, usetex=False)
path = Path(verts, codes)
angle = angle/180.*3.141592
if self.flipy():
transform = Affine2D().scale(fontsize/text2path.FONT_SCALE,
fontsize/text2path.FONT_SCALE).\
rotate(angle).translate(x, self.height-y)
else:
transform = Affine2D().scale(fontsize/text2path.FONT_SCALE,
fontsize/text2path.FONT_SCALE).\
rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(x, y, s, prop, angle, ismath)
color = gc.get_rgb()[:3]
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
fontscale = self._text2path.FONT_SCALE
if ismath:
width, height, descent, glyphs, rects = \
self._text2path.mathtext_parser.parse(s, dpi, prop)
return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
font.set_text(s, 0.0, flags=flags) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._forced_alpha = False
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgb or rgba, set ``isRGB=True`` for
efficiency.
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
if len(self._rgb) == 4 and not self._forced_alpha:
self.set_alpha(self._rgb[3])
# Use set_alpha method here so that subclasses will
# be calling their own version, which may set their
# own attributes.
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd.keys():
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self.set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func,args,kwargs in self.callbacks:
ret = func(*args, **kwargs)
if ret == False:
self.callbacks.remove((func,args,kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except TypeError:
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
if _has_pil:
filetypes['jpg'] = filetypes['jpeg'] = 'Joint Photographic Experts Group'
def print_jpg(self, filename_or_obj, *args, **kwargs):
"""
Supported kwargs:
*quality*: The image quality, on a scale from 1 (worst) to
95 (best). The default is 75. Values above 95 should
be avoided; 100 completely disables the JPEG
quantization stage.
*optimize*: If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
*progressive*: If present, indicates that this image
should be stored as a progressive JPEG file.
"""
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False): return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
options = cbook.restrict_dict(kwargs, ['quality', 'optimize',
'progressive'])
return image.save(filename_or_obj, **options)
print_jpeg = print_jpg
filetypes['tif'] = filetypes['tiff'] = 'Tagged Image File Format'
def print_tif(self, filename_or_obj, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False): return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
return image.save(filename_or_obj)
print_tiff = print_tif
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_print_method(self, format):
method_name = 'print_%s' % format
# check for registered backends
if format in _backend_d:
backend_class = _backend_d[format]
def _print_method(*args, **kwargs):
backend = self.switch_backends(backend_class)
print_method = getattr(backend, method_name)
return print_method(*args, **kwargs)
return _print_method
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
return getattr(self, method_name)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure.
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'.
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
print_method = self._get_print_method(format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)(
result = print_method(
cStringIO.StringIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bb = [a.get_window_extent(renderer) for a \
in kwargs.pop("bbox_extra_artists", [])]
if bb:
_bbox = Bbox.union([b for b in bb if b.width!=0 or b.height!=0])
bbox_inches1 = TransformedBbox(_bbox,
Affine2D().scale(1./self.figure.dpi))
bbox_inches = Bbox.union([bbox_inches, bbox_inches1])
pad = kwargs.pop("pad_inches", 0.1)
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, format,
bbox_inches)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
self.full_screen_toggle()
# home or reset mnemonic (default key 'h', 'home' and 'r')
elif event.key in home_keys:
self.canvas.toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
self.canvas.toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
self.canvas.toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
self.canvas.toolbar.pan()
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
self.canvas.toolbar.zoom()
# saving current figure (default key 's')
elif event.key in save_keys:
self.canvas.toolbar.save_figure()
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
self.canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key!='0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()) :
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()) :
self._xypress.append(( x, y, a, i, a.viewLim.frozen(),
a.transData.frozen() ))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
'the drag callback in zoom mode'
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
|
gpl-2.0
|
jvpoulos/cs289-project
|
code/net_drop_bin_scaled.py
|
1
|
4828
|
# Code adapted from https://github.com/Newmu/Theano-Tutorials
import sys, time
from ntpath import basename
from os.path import splitext
from itertools import product
import cPickle as pickle
import theano
from theano import tensor as T
import numpy as np
from sklearn.cross_validation import KFold
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, gamma):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
updates.append([p, p - g * gamma])
return updates
def model(X, w_h, w_o):
h = T.nnet.sigmoid(T.dot(X, w_h))
pyx = T.nnet.softmax(T.dot(h, w_o))
return pyx
# get filename for saving results
filename = splitext(basename(sys.argv[0]))[0]
print 'Executing {}'.format(filename)
# Load training and test sets
x_train = np.load('../adult-dataset/data_drop_bin_scaled.np')
y_train = np.load('../adult-dataset/labels_drop_bin_onehot.np')
# Network topology
n_inputs = x_train.shape[1]
n_outputs = len(np.unique(y_train))
# Cross-validation and Neural Net parameters
params_dict = pickle.load(open('params_dict.pkl', 'rb'))
n_folds = params_dict['n_folds']
alphas = params_dict['alphas']
gammas = params_dict['gammas']
batch_sizes = params_dict['batch_sizes']
max_epoch = params_dict['max_epoch']
# Dictionary to store results
results_dict = {}
params_matrix = np.array([x for x in product(alphas, gammas, batch_sizes)])
params_matrix = np.column_stack((params_matrix,
np.zeros(params_matrix.shape[0]),
np.zeros(params_matrix.shape[0]),
np.zeros(params_matrix.shape[0])))
for param_idx in xrange(params_matrix.shape[0]):
alpha = params_matrix[param_idx, 0]
gamma = params_matrix[param_idx, 1]
batch_size = int(params_matrix[param_idx, 2])
n_hidden = (x_train.shape[0] / n_folds)/(alpha*(n_inputs+n_outputs))
# Initialize weights
w_h = init_weights((n_inputs, n_hidden))
w_o = init_weights((n_hidden, n_outputs))
# Initialize NN classifier
X = T.fmatrix()
Y = T.fmatrix()
py_x = model(X, w_h, w_o)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
params = [w_h, w_o]
updates = sgd(cost, params, gamma=gamma)
train = theano.function(inputs=[X, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True)
predict = theano.function(inputs=[X],
outputs=y_x,
allow_input_downcast=True)
# Test on validation set
model_str = 'alpha {} gamma {} batch size {}'.format(alpha,
gamma,
batch_size)
print model_str
kf = KFold(x_train.shape[0], n_folds=n_folds)
error_rates = []
test_costs = []
running_time = []
fold = 1
for train_idx, val_idx in kf:
start_time = time.time()
for i in range(max_epoch):
for start, end in zip(range(0, len(x_train[train_idx]),
batch_size),
range(batch_size, len(x_train[train_idx]),
batch_size)):
test_cost = train(x_train[train_idx][start:end],
y_train[train_idx][start:end])
error_rate = 1 - np.mean(np.argmax(y_train[val_idx], axis=1) ==
predict(x_train[val_idx]))
print 'fold {}, epoch {}, error rate {}, cost {}'.format(fold, i+1,
error_rate,
test_cost)
error_rates.append(error_rate)
test_costs.append(test_cost)
running_time.append(np.around((time.time() - start_time) / 60., 1))
fold += 1
params_matrix[param_idx, 3] = np.mean(error_rate)
params_matrix[param_idx, 4] = np.mean(test_cost)
params_matrix[param_idx, 5] = np.mean(running_time)
print 'alpha {} gamma {} batchsize {} error rate {} test cost {} running time {}'.format(params_matrix[param_idx,0],
params_matrix[param_idx,1],
params_matrix[param_idx,2],
params_matrix[param_idx,3],
params_matrix[param_idx,4],
params_matrix[param_idx,5])
# Save params matrix to disk
params_matrix.dump('{}_results.np'.format(filename))
|
mit
|
5aurabhpathak/masters-thesis
|
test/visualise_pbresults.py
|
2
|
3521
|
#!/bin/env python3
#Author: Saurabh Pathak
'''graph visualizer for PBSMT experiment results'''
from matplotlib import animation as an, pyplot as pl, rcParams
from mpl_toolkits.mplot3d import Axes3D
from numpy import loadtxt, arange, argmax, array
import sys, getopt
def graph_col1vscol2(x, y, z):
fig = pl.figure('{} vs {}'.format(x,y), figsize=(13.66, 7.68))
i = 1
while data[z][i] == data[z][0]: i += 1
j = k = 0
for t in range(i, len(data[x])+1, i):
color = 'C{}'.format(k)
if z == 'Distortion Limit' and data[z][j] == 13: l = 'unlimited'
else: l = data[z][j]
if z == 'Stack Size':
pl.scatter(data[x][j], data[y][j], c=color, s=20)
pl.scatter(data[x][t-1], data[y][t-1], c=color, s=20)
pl.plot(data[x][j+1:t-1], data[y][j+1:t-1], '-', c=color, label=l, marker='o', markersize=5)
else: pl.plot(data[x][j:t], data[y][j:t], '-', c=color, label=l, marker='o', markersize=5)
j = t
k += 1
pl.xlabel(x)
pl.ylabel(y)
if y == 'BLEU':
pl.yticks(arange(int(min(data[y])), max(data[y])+.2, .4))
i = argmax(data[y])
else:
pl.yticks(arange(0, max(data[y])+1, 10))
if x == 'Distortion Limit': i = argmax([(data[y][j] if data[x][j] != 13 else 0) for j in range(len(data))])
else: i = argmax(data[y])
pl.legend(bbox_to_anchor=(1, 1), title=z)
if x == 'Distortion Limit':
ticks = [str(x) for x in range(13)]+['\u221E']
pl.xticks(arange(0,14,1))
pl.gca().set_xticklabels(ticks)
else: pl.xticks([100,500,1000,1500,2000])
pl.title('Max {}: {}\nat {}: {:.0f}'.format(y, data[y][i], x, data[x][i]))
fig.set_size_inches(5, 5)
pl.savefig('{}_vs_{}.png'.format(x, y), format='png', bbox_inches='tight')
if draw: pl.show()
else: pl.close()
def graph(x, y, z):
'''creates animated plot in 3D'''
fig = pl.figure('3D {} Plot'.format(z))
ax = pl.subplot(projection='3d')
ax.scatter(data[x], data[y], data[z], cmap='coolwarm', s=10, depthshade=False)
d = array([i for i in data if i[1] not in {0,13}], dtype=typeinfo)
fig.colorbar(ax.plot_surface(d[x].reshape(7,5), d[y].reshape(7,5), d[z].reshape(7,5), cmap='coolwarm', alpha=0.9))
ax.set_xlabel(x)
ax.set_ylabel(y)
ax.set_zlabel(z)
anim = an.FuncAnimation(fig, lambda i: ax.view_init(elev=30, azim=i), frames=360, interval=100)
anim.save('{}anim.gif'.format(z), writer='imagemagick', fps=15)
if draw: pl.show()
else: pl.close()
def usage():
print('Usage: visualise_pbresults.py --input pbbleu_file [--show-plots]')
exit(1)
if __name__=='__main__':
opts, args = getopt.getopt(sys.argv[1:], None, ['show-plots', 'input='])
if len(args) > 0 or not 1 <= len(opts) <= 2:
print('Invalid arguments or fewer parameters.')
usage()
draw = False
for o, a in opts:
if o == '--show-plots': draw = True
elif o == '--input': ip = a
else:
print('Invalid parameters.')
usage()
s, d, b, r = 'Stack Size', 'Distortion Limit', 'BLEU', 'Runtime (minutes)'
typeinfo = [(s,'int'), (d, 'int'), (b, 'float'), (r, 'float')]
data=loadtxt(ip, dtype=typeinfo, converters={1: lambda x: int(x) if int(x) != -1 else 13})
rcParams.update({'font.size': 14})
graph_col1vscol2(d,b,s)
graph_col1vscol2(d,r,s)
data.sort(order=d, kind='mergesort')
graph_col1vscol2(s,b,d)
graph_col1vscol2(s,r,d)
#graph(d,s,b)
#graph(d,s,r)
|
gpl-3.0
|
jungmannlab/picasso
|
picasso/gui/average3.py
|
1
|
73230
|
"""
gui/average3
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: Maximilian Strauss, 2017-2018
:copyright: Copyright (c) 2017-2018 Jungmann Lab, MPI of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt5 import QtCore, QtGui, QtWidgets
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis, vx, vy, vz, angle, pixelsize):
if axis == "z":
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == "y":
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == "x":
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Parameters")
self.setModal(False)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Oversampling:"), 0, 0)
self.oversampling = QtWidgets.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtWidgets.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtWidgets.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(
pixmap.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.FastTransformation,
)
)
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(
self.locs, oversampling, t_min, t_min, t_max, t_max
)
self.set_image(image_avg)
class DatasetDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self, path):
c = QtWidgets.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Average3")
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "average.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction("Save")
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu("Process")
parameters_action = process_menu.addAction("Parameters")
parameters_action.setShortcut("Ctrl+P")
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction("Datasets")
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = "Zoom"
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtWidgets.QLabel("")
self.viewxz = QtWidgets.QLabel("")
self.viewyz = QtWidgets.QLabel("")
self.viewcp = QtWidgets.QLabel("")
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtWidgets.QGroupBox("Display")
displaygrid = QtWidgets.QGridLayout(display_groupbox)
displaygrid.addWidget(QtWidgets.QLabel("XY"), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtWidgets.QLabel("XZ"), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtWidgets.QLabel("YZ"), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtWidgets.QLabel("CP"), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtWidgets.QGroupBox("Buttons")
buttongrid = QtWidgets.QGridLayout(button_groupbox)
rotation_groupbox = QtWidgets.QGroupBox("Rotation + Translation")
rotationgrid = QtWidgets.QGridLayout(rotation_groupbox)
centerofmassbtn = QtWidgets.QPushButton("Center of Mass XYZ")
axis_groupbox = QtWidgets.QGroupBox("Axis")
axisgrid = QtWidgets.QGridLayout(axis_groupbox)
self.x_axisbtn = QtWidgets.QRadioButton("X")
self.y_axisbtn = QtWidgets.QRadioButton("Y")
self.z_axisbtn = QtWidgets.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtWidgets.QGroupBox("Projection")
projgrid = QtWidgets.QGridLayout(proj_groupbox)
self.xy_projbtn = QtWidgets.QRadioButton("XY")
self.yz_projbtn = QtWidgets.QRadioButton("YZ")
self.xz_projbtn = QtWidgets.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtWidgets.QPushButton("Rotate")
self.radio_sym = QtWidgets.QRadioButton("x symmetry")
self.symEdit = QtWidgets.QSpinBox()
self.symEdit.setRange(2, 100)
self.symEdit.setValue(8)
self.radio_sym_custom = QtWidgets.QRadioButton("custom symmetry")
self.symcustomEdit = QtWidgets.QLineEdit("90,180,270")
deg_groupbox = QtWidgets.QGroupBox("Degrees")
deggrid = QtWidgets.QGridLayout(deg_groupbox)
self.full_degbtn = QtWidgets.QRadioButton("Full")
self.part_degbtn = QtWidgets.QRadioButton("Part")
self.degEdit = QtWidgets.QTextEdit()
self.degEdit = QtWidgets.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtWidgets.QCheckBox("Translate only")
self.flipbtn = QtWidgets.QCheckBox("Consider flipped structures")
self.alignxbtn = QtWidgets.QPushButton("Align X")
self.alignybtn = QtWidgets.QPushButton("Align Y")
self.alignzzbtn = QtWidgets.QPushButton("Align Z_Z")
self.alignzybtn = QtWidgets.QPushButton("Align Z_Y")
self.translatexbtn = QtWidgets.QPushButton("Translate X")
self.translateybtn = QtWidgets.QPushButton("Translate Y")
self.translatezbtn = QtWidgets.QPushButton("Translate Z")
self.rotatexy_convbtn = QtWidgets.QPushButton("Rotate XY - Convolution")
self.scorebtn = QtWidgets.QPushButton("Calculate Score")
operate_groupbox = QtWidgets.QGroupBox("Operate")
operategrid = QtWidgets.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("x-Range (Px)"), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("y-Range (Px)"), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtWidgets.QLineEdit("-1000,1000")
rotationgrid.addWidget(QtWidgets.QLabel("z-Range (nm)"), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn, 4, 0)
operategrid.addWidget(self.scorebtn, 4, 1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtWidgets.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtWidgets.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
contrastgrid = QtWidgets.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
MODEL_Y_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
MODEL_Z_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
self.modelchk = QtWidgets.QCheckBox("Use Model")
self.model_x = QtWidgets.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtWidgets.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtWidgets.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtWidgets.QPushButton("Preview")
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtWidgets.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtWidgets.QGroupBox("Model")
modelgrid = QtWidgets.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk, 0, 0)
modelgrid.addWidget(QtWidgets.QLabel("X-Coordinates"), 1, 0)
modelgrid.addWidget(self.model_x, 1, 1)
modelgrid.addWidget(QtWidgets.QLabel("Y-Coordinates"), 2, 0)
modelgrid.addWidget(self.model_y, 2, 1)
modelgrid.addWidget(QtWidgets.QLabel("Z-Coordinates"), 3, 0)
modelgrid.addWidget(self.model_z, 3, 1)
modelgrid.addWidget(QtWidgets.QLabel("Blur:"), 4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtWidgets.QLabel("Pixelsize:"), 5, 0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6, 0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage("Average3 ready.")
def open(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]["Width"] / 2
cy = self.infos[i][0]["Height"] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{"Generated by": "Picasso Average3"}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, "z")
out_path = os.path.splitext(self.locs_paths[i])[0] + "_avg3.hdf5"
path, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
print("Opening {} ..".format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, "group"):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Error")
msgBox.setText(
("Datafile does not contain group information."
" Please load file with picked localizations.")
)
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, "z"):
locs = lib.append_to_rec(locs, locs.x.copy(), "z")
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize, ok = QtWidgets.QInputDialog.getInt(
self,
"Pixelsize Dialog",
"Please enter the pixelsize in nm",
130,
)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(
self.updateLayout
)
cx = self.infos[-1][0]["Width"] / 2
cy = self.infos[-1][0]["Height"] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x), np.min(locs.y)])
self.t_max = np.max([np.max(locs.x), np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min(
[np.min(locs.x), np.min(locs.y), self.t_min]
)
self.t_max = np.max(
[np.max(locs.x), np.max(locs.y), self.t_max]
)
self.z_min = np.min([np.min(locs.z), self.z_min])
self.z_max = np.min([np.max(locs.z), self.z_max])
if len(self.locs) == 1:
print("Dataset loaded from {}.".format(path))
else:
print(
("Dataset loaded from {},"
" Total number of datasets {}.").format(
path, len(self.locs)
)
)
# CREATE GROUP INDEX
if hasattr(locs, "group"):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix(
(n_groups, n_locs), dtype=np.bool
)
progress = lib.ProgressDialog(
"Creating group index", 0, len(groups), self
)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i + 1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
# Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
# stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
# CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max(
[
3
* np.sqrt(
np.mean(self.locs[j].x ** 2 + self.locs[j].y ** 2)
),
self.r,
]
)
self.r_z = np.max(
[5 * np.sqrt(np.mean(self.locs[j].z ** 2)), self.r_z]
)
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print("Aligning by center of mass.. ", end="", flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog(
"Aligning by center of mass", 0, n_groups, self
)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i + 1)
out_locs_x = stack_arrays(
out_locs_x, asrecarray=True, usemask=False
)
out_locs_y = stack_arrays(
out_locs_y, asrecarray=True, usemask=False
)
out_locs_z = stack_arrays(
out_locs_z, asrecarray=True, usemask=False
)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print("Complete.")
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(
render.render_hist3d(
locs[i],
oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
)
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images, colors, 2)
pixmap2 = self.pixmap_from_colors(images, colors, 0)
pixmap3 = self.pixmap_from_colors(images, colors, 1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self, images, colors, axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print("Align X")
self.align_all("x")
def align_y(self):
print("Align Y")
self.align_all("y")
def align_zz(self):
print("Align Z")
self.align_all("zz")
def align_zy(self):
print("Align Z")
self.align_all("zy")
def translate_x(self):
print("Translate X")
self.translate("x")
def translate_y(self):
print("Translate Y")
self.translate("y")
def translate_z(self):
print("Translate Z")
self.translate("z")
def translate(self, translateaxis):
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
if translateaxis == "x":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == "y":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == "z":
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print("Translating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage("Done!")
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == "x":
proplane = "xy"
elif translateaxis == "y":
proplane = "xy"
elif translateaxis == "z":
proplane = "xz"
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
plane = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) #
if translateaxis == "x":
projection = np.sum(plane, axis=0)
elif translateaxis == "y":
projection = np.sum(plane, axis=1)
elif translateaxis == "z":
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
# print('Step X')
# ax3 = fig.add_subplot(1,3,3)
# plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j], projection))
shiftval = (
np.argmax(signal.correlate(signalimg[j], projection))
- len(signalimg[j])
+ 1
)
all_xcorr[0, j] = corrval
all_da[0, j] = shiftval / self.oversampling
if plotmode:
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
dafinal = np.mean(all_da[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == "x":
self.locs[j].x[index] += dafinal
elif translateaxis == "y":
self.locs[j].y[index] += dafinal
elif translateaxis == "z":
self.locs[j].z[index] += dafinal * self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print("Z min {}, Z max {}".format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print("X min {}, X max {}".format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print("Y min {}, Y max {}".format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(
self, CF_image_avg, angles, group, rotaxis, proplane
):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k, j] = xcorr
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
# TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
# Change CFiamge for symmetry
if self.radio_sym.isChecked():
print("Using symmetry.")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
print(symmetry_txt)
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = image
print("Convolving..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.rotatexy_convolution_group(
CF_image_avg, angles, i, rotaxis, proplane
)
self.updateLayout()
self.status_bar.showMessage("Done!")
def rotate_groups(self):
# Read out values from radiobuttons
# TODO: maybe re-write this with kwargs
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
if self.radio_sym.isChecked():
print("Radio sym")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
# TODO: Sort these functions out,
# combine with radio_sym / also for convolving.
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = [np.conj(np.fft.fft2(_)) for _ in image]
# n_pixel, _ = image_avg.shape
# image_half = n_pixel / 2
print("Rotating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.align_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage("Done!")
def getUIstate(self):
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
elif self.yz_projbtn.isChecked():
proplane = "yz"
elif self.xz_projbtn.isChecked():
proplane = "xz"
return rotaxis, proplane
def projectPlanes(self, images, proplane):
if proplane == "xy":
image = [np.sum(_, axis=2) for _ in images]
elif proplane == "yz":
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif proplane == "xz":
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
return image
def generate_template(self):
model_x_str = np.asarray((self.model_x.text()).split(","))
model_y_str = np.asarray((self.model_y.text()).split(","))
model_z_str = np.asarray((self.model_z.text()).split(","))
model_x = []
model_y = []
model_z = []
for element in model_x_str:
try:
model_x.append(float(element))
except ValueError:
pass
for element in model_y_str:
try:
model_y.append(float(element))
except ValueError:
pass
for element in model_z_str:
try:
model_z.append(float(element))
except ValueError:
pass
pixelsize = self.pixelsizeEdit.value()
blur = self.modelblurEdit.value()
# Center of mass
model_x = np.array(model_x) / pixelsize
model_y = np.array(model_y) / pixelsize
model_z = np.array(model_z)
model_x = model_x - np.mean(model_x)
model_y = model_y - np.mean(model_y)
model_z = model_z - np.mean(model_z)
rotaxis, proplane = self.getUIstate()
template_img = self.render_planes(
model_x, model_y, model_z, proplane, pixelsize
)
self.template_img = scipy.ndimage.filters.gaussian_filter(
template_img, blur
)
def model_preview(self):
self.generate_template()
# Generate a template image
fig = plt.figure()
plt.title("Preview of Template")
plt.imshow(self.template_img, interpolation="nearest", cmap=plt.cm.hot)
plt.show()
def calculate_score(self):
# Dummy button -> Functionality of rotatebtn for now
# TODO: maybe re-write this with kwargs
self.scores = []
rotaxis, proplane = self.getUIstate()
n_groups = self.group_index[0].shape[0]
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
n_locs = sum([_[0] for _ in renderings])
# Make an average and not a sum image here..
images = np.array([_[1] / n_groups for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
image = self.projectPlanes(images, proplane)
n_channels = len(image)
print("Calculating score..")
for i in tqdm(range(n_groups)):
channel_score = []
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][i].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
groupimage = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
score = np.sum(np.sqrt(groupimage * image[j])) / np.sum(
np.sqrt(groupimage * groupimage)
)
channel_score.append(score)
self.scores.append(channel_score)
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.status_bar.showMessage(
"Done. Average score: {}".format(np.mean(self.scores))
)
plt.hist(np.array(self.scores), 40)
plt.title(
"Histogram of Scores, Mean: {:.2f}".format(np.mean(self.scores))
)
plt.xlabel("Score")
plt.ylabel("Counts")
plt.show()
def mean_angle(self, deg):
return phase(sum(rect(1, d) for d in deg) / len(deg))
def render_planes(self, xdata, ydata, zdata, proplane, pixelsize):
# assign correct renderings for all planes
a_render = []
b_render = []
if proplane == "xy":
a_render = xdata
b_render = ydata
aval_min = self.t_min
aval_max = self.t_max
bval_min = self.t_min
bval_max = self.t_max
elif proplane == "yz":
a_render = ydata
b_render = np.divide(zdata, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
elif proplane == "xz":
b_render = np.divide(zdata, pixelsize)
a_render = xdata
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
N, plane = render_histxyz(
a_render,
b_render,
self.oversampling,
aval_min,
aval_max,
bval_min,
bval_max,
)
return plane
def align_all(self, alignaxis):
a_step = np.arcsin(1 / (self.oversampling * self.r))
angles = np.arange(0, 2 * np.pi, a_step)
n_channels = len(self.locs)
n_angles = len(angles)
all_corr = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
alignimage = []
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
alignimage = []
for k in range(n_angles):
angle = angles[k]
if alignaxis == "zz":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "zy":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "y":
proplane = "xy"
rotaxis = "z"
elif alignaxis == "x":
proplane = "xy"
rotaxis = "z"
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
if alignimage == []:
alignimage = np.zeros(image.shape)
# CREATE ALIGNIMAGE
if alignaxis == "zz":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
elif alignaxis == "zy":
alignimage[:, np.int(alignimage.shape[0] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[0] / 2) + 1
] += 1
alignimage[
:, np.int(alignimage.shape[0] / 2) - 1
] += 1
elif alignaxis == "y":
alignimage[:, np.int(alignimage.shape[1] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[1] / 2) - 1
] += 1
alignimage[
:, np.int(alignimage.shape[1] / 2) + 1
] += 1
elif alignaxis == "x":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
all_corr[k, j] = np.sum(np.multiply(alignimage, image))
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(
alignimage,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_corr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x = x_rot
self.locs[j].y = y_rot
self.locs[j].z = z_rot
self.updateLayout()
self.status_bar.showMessage(
"Align on Axis {} complete.".format(alignaxis)
)
def align_group(self, CF_image_avg, angles, group, rotaxis, proplane):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
flips = 1
if self.flipbtn.isChecked():
print("Considering flipped structures...")
flips = 2
for f in range(flips):
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if f == 1: # Flipped round
if proplane == "xy":
x_original = -x_original
elif proplane == "yz":
y_original = -y_original
elif proplane == "xz":
z_original = -z_original
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = compute_xcorr(CF_image_avg[j], image)
n_pixelb, n_pixela = image.shape
image_halfa = n_pixela / 2 # TODO: CHECK THOSE VALUES
image_halfb = n_pixelb / 2
# find the brightest pixel
b_max, a_max = np.unravel_index(
xcorr.argmax(), xcorr.shape
)
# store the transformation if the correlation
# is larger than before
all_xcorr[k, j] = xcorr[b_max, a_max]
all_db[k, j] = (
np.ceil(b_max - image_halfb) / self.oversampling
)
all_da[k, j] = (
np.ceil(a_max - image_halfa) / self.oversampling
)
flipstate = False
if f == 0:
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
maximumcc_val = np.max(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc, :])
dbfinal = np.mean(all_db[maximumcc, :])
else:
maximumcc_val_f = np.max(np.sum(all_xcorr, axis=1))
if maximumcc_val < maximumcc_val_f:
flipstate = True
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc, :])
dbfinal = np.mean(all_db[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
if flipstate:
if proplane == "xy":
self.locs[j].x[index] = -x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
elif proplane == "yz":
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = -y_rot
self.locs[j].z[index] = z_rot
elif proplane == "xz":
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = -z_rot
else:
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
# Shift image group locs
if self.translatebtn.isChecked():
dbfinal = 0
if proplane == "xy":
self.locs[j].x[index] -= dafinal
self.locs[j].y[index] -= dbfinal
elif proplane == "yz":
self.locs[j].y[index] -= dafinal
self.locs[j].z[index] -= dbfinal * self.pixelsize
elif proplane == "xz":
self.locs[j].z[index] -= dafinal
self.locs[j].x[index] -= dbfinal * self.pixelsize
def fit_in_view(self, autoscale=False):
movie_height, movie_width = self.movie_size()
viewport = [(0, 0), (movie_height, movie_width)]
self.update_scene(viewport=viewport, autoscale=autoscale)
def movie_size(self):
movie_height = self.max_movie_height()
movie_width = self.max_movie_width()
return (movie_height, movie_width)
def max_movie_height(self):
""" Returns maximum height of all loaded images. """
return max(info[0]["Height"] for info in self.infos)
def max_movie_width(self):
return max([info[0]["Width"] for info in self.infos])
def update_scene(
self, viewport=None, autoscale=False, use_cache=False, picks_only=False
):
n_channels = len(self.locs)
if n_channels:
viewport = viewport or self.viewport
self.draw_scene(
viewport,
autoscale=autoscale,
use_cache=use_cache,
picks_only=picks_only,
)
# self.update_cursor()
def draw_scene(
self, viewport, autoscale=False, use_cache=False, picks_only=False
):
self.viewport = self.adjust_viewport_to_view(viewport)
qimage = self.render_scene(autoscale=autoscale, use_cache=use_cache)
self.qimage = qimage.scaled(
self.viewxy.width(),
self.viewxy.height(),
QtCore.Qt.KeepAspectRatioByExpanding,
)
def adjust_viewport_to_view(self, viewport):
"""
Adds space to a desired viewport so that
it matches the window aspect ratio.
"""
viewport_height = viewport[1][0] - viewport[0][0]
viewport_width = viewport[1][1] - viewport[0][1]
view_height = self.height()
view_width = self.width()
viewport_aspect = viewport_width / viewport_height
view_aspect = view_width / view_height
if view_aspect >= viewport_aspect:
y_min = viewport[0][0]
y_max = viewport[1][0]
x_range = viewport_height * view_aspect
x_margin = (x_range - viewport_width) / 2
x_min = viewport[0][1] - x_margin
x_max = viewport[1][1] + x_margin
else:
x_min = viewport[0][1]
x_max = viewport[1][1]
y_range = viewport_width / view_aspect
y_margin = (y_range - viewport_height) / 2
y_min = viewport[0][0] - y_margin
y_max = viewport[1][0] + y_margin
return [(y_min, x_min), (y_max, x_max)]
def render_scene(
self, autoscale=False, use_cache=False, cache=True, viewport=None
):
kwargs = self.get_render_kwargs(viewport=viewport)
n_channels = len(self.locs)
if n_channels == 1:
self.render_single_channel(
kwargs, autoscale=autoscale, use_cache=use_cache, cache=cache
)
else:
self.render_multi_channel(
kwargs, autoscale=autoscale, use_cache=use_cache, cache=cache
)
self._bgra[:, :, 3].fill(255)
Y, X = self._bgra.shape[:2]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
return qimage
def get_render_kwargs(
self, viewport=None
): # Dummy for now: TODO: Implement
viewport = [(0, 0), (32, 32)]
return {
"oversampling": 5,
"viewport": viewport,
"blur_method": None,
"min_blur_width": float(0),
}
def render_multi_channel(
self, kwargs, autoscale=False, locs=None, use_cache=False, cache=True
):
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
if use_cache:
n_locs = self.n_locs
image = self.image
else:
renderings = [render.render(_, **kwargs) for _ in locs]
n_locs = sum([_[0] for _ in renderings])
image = np.array([_[1] for _ in renderings])
if cache:
self.n_locs = n_locs
self.image = image
image = self.scale_contrast(image)
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
return self._bgra
def render_single_channel(
self, kwargs, autoscale=False, use_cache=False, cache=True
):
locs = self.locs[0]
if hasattr(locs, "group"):
locs = [locs[self.group_color == _] for _ in range(N_GROUP_COLORS)]
return self.render_multi_channel(
kwargs, autoscale=autoscale, locs=locs, use_cache=use_cache
)
if use_cache:
n_locs = self.n_locs
image = self.image
else:
n_locs, image = render.render(locs, **kwargs)
if cache:
self.n_locs = n_locs
self.image = image
image = self.scale_contrast(image, autoscale=autoscale)
image = self.to_8bit(image)
Y, X = image.shape
# cmap = self.window.display_settings_dialog.colormap.currentText()
# TODO: selection of colormap?
cmap = "hot"
cmap = np.uint8(np.round(255 * plt.get_cmap(cmap)(np.arange(256))))
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
return self._bgra
def to_8bit(self, image):
return np.round(255 * image).astype("uint8")
def scale_contrast(self, image, autoscale=False):
if image.ndim == 2:
max_ = image.max()
else:
max_ = min([_.max() for _ in image])
upper = self.contrastEdit.value() * max_
lower = 0
if upper > 0:
image = (image - lower) / (upper - lower)
image[~np.isfinite(image)] = 0
image = np.minimum(image, 1.0)
image = np.maximum(image, 0.0)
return image
def main():
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
def excepthook(type, value, tback):
lib.cancel_dialogs()
message = "".join(traceback.format_exception(type, value, tback))
errorbox = QtWidgets.QMessageBox.critical(
window, "An error occured", message
)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
mit
|
masfaraud/volmdlr
|
scripts/mesh/mesher.py
|
1
|
190637
|
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.tri as tri
import numpy as npy
import volmdlr as vm
import volmdlr.mesh as vmmesh
import finite_elements.elasticity as els
import finite_elements.core as corefe
import math
from scipy import sparse
from scipy import linalg
from finite_elements.core import steel,aluminium
import time
from dessia_common import DessiaObject
from typing import TypeVar, List, Tuple
from scipy.spatial import Delaunay
from itertools import product
from itertools import combinations
rotor_internal_contour = vm.Contour2D.dict_to_object({'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Contour2D', 'primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Circle2D', 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.03, 'angle': 6.283185307179586, 'utd_geo_points': False, 'points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.029630650217854132, 0.004693033951206926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, 0.009270509831248422]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651038, 0.013619714992186402]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.024270509831248423, 0.017633557568774192]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021213203435596427, 0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774192, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186404, 0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248424, 0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0046930339512069276, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.8369701987210296e-18, 0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206924, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.00927050983124842, 0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0136197149921864, 0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01763355756877419, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021213203435596423, 0.021213203435596427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02427050983124842, 0.017633557568774196]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651034, 0.013619714992186406]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.028531695488854605, 0.009270509831248426]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02963065021785413, 0.004693033951206929]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03, 3.673940397442059e-18]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.029630650217854132, -0.0046930339512069215]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02853169548885461, -0.009270509831248419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651038, -0.0136197149921864]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.024270509831248427, -0.01763355756877419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02121320343559643, -0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017633557568774196, -0.02427050983124842]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.013619714992186407, -0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009270509831248426, -0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206931, -0.02963065021785413]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-5.510910596163089e-18, -0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.00469303395120692, -0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248417, -0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186399, -0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774185, -0.024270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02121320343559642, -0.02121320343559643]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02427050983124842, -0.0176335575687742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651034, -0.01361971499218641]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, -0.009270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02963065021785413, -0.004693033951206933]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.029630650217854132, 0.004693033951206926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, 0.009270509831248422]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651038, 0.013619714992186402]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.024270509831248423, 0.017633557568774192]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021213203435596427, 0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774192, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186404, 0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248424, 0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0046930339512069276, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.8369701987210296e-18, 0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206924, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.00927050983124842, 0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0136197149921864, 0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01763355756877419, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021213203435596423, 0.021213203435596427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02427050983124842, 0.017633557568774196]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651034, 0.013619714992186406]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.028531695488854605, 0.009270509831248426]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02963065021785413, 0.004693033951206929]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03, 3.673940397442059e-18]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.029630650217854132, -0.0046930339512069215]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02853169548885461, -0.009270509831248419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651038, -0.0136197149921864]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.024270509831248427, -0.01763355756877419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02121320343559643, -0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017633557568774196, -0.02427050983124842]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.013619714992186407, -0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009270509831248426, -0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206931, -0.02963065021785413]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-5.510910596163089e-18, -0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.00469303395120692, -0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248417, -0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186399, -0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774185, -0.024270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02121320343559642, -0.02121320343559643]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02427050983124842, -0.0176335575687742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651034, -0.01361971499218641]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, -0.009270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02963065021785413, -0.004693033951206933]}]}], 'basis_primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Circle2D', 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.03, 'angle': 6.283185307179586, 'utd_geo_points': False, 'points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.029630650217854132, 0.004693033951206926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, 0.009270509831248422]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651038, 0.013619714992186402]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.024270509831248423, 0.017633557568774192]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021213203435596427, 0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774192, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186404, 0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248424, 0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0046930339512069276, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.8369701987210296e-18, 0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206924, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.00927050983124842, 0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0136197149921864, 0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01763355756877419, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021213203435596423, 0.021213203435596427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02427050983124842, 0.017633557568774196]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651034, 0.013619714992186406]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.028531695488854605, 0.009270509831248426]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02963065021785413, 0.004693033951206929]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03, 3.673940397442059e-18]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.029630650217854132, -0.0046930339512069215]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02853169548885461, -0.009270509831248419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651038, -0.0136197149921864]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.024270509831248427, -0.01763355756877419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02121320343559643, -0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017633557568774196, -0.02427050983124842]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.013619714992186407, -0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009270509831248426, -0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206931, -0.02963065021785413]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-5.510910596163089e-18, -0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.00469303395120692, -0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248417, -0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186399, -0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774185, -0.024270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02121320343559642, -0.02121320343559643]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02427050983124842, -0.0176335575687742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651034, -0.01361971499218641]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, -0.009270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02963065021785413, -0.004693033951206933]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.029630650217854132, 0.004693033951206926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, 0.009270509831248422]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651038, 0.013619714992186402]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.024270509831248423, 0.017633557568774192]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021213203435596427, 0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774192, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186404, 0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248424, 0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0046930339512069276, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.8369701987210296e-18, 0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206924, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.00927050983124842, 0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0136197149921864, 0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01763355756877419, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021213203435596423, 0.021213203435596427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02427050983124842, 0.017633557568774196]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651034, 0.013619714992186406]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.028531695488854605, 0.009270509831248426]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02963065021785413, 0.004693033951206929]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03, 3.673940397442059e-18]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.029630650217854132, -0.0046930339512069215]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02853169548885461, -0.009270509831248419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651038, -0.0136197149921864]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.024270509831248427, -0.01763355756877419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02121320343559643, -0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017633557568774196, -0.02427050983124842]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.013619714992186407, -0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009270509831248426, -0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206931, -0.02963065021785413]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-5.510910596163089e-18, -0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.00469303395120692, -0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248417, -0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186399, -0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774185, -0.024270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02121320343559642, -0.02121320343559643]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02427050983124842, -0.0176335575687742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651034, -0.01361971499218641]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, -0.009270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02963065021785413, -0.004693033951206933]}]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.029630650217854132, 0.004693033951206926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, 0.009270509831248422]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651038, 0.013619714992186402]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.024270509831248423, 0.017633557568774192]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021213203435596427, 0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774192, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186404, 0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248424, 0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0046930339512069276, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.8369701987210296e-18, 0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206924, 0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.00927050983124842, 0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0136197149921864, 0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01763355756877419, 0.024270509831248423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021213203435596423, 0.021213203435596427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02427050983124842, 0.017633557568774196]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651034, 0.013619714992186406]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.028531695488854605, 0.009270509831248426]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02963065021785413, 0.004693033951206929]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03, 3.673940397442059e-18]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.029630650217854132, -0.0046930339512069215]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02853169548885461, -0.009270509831248419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.026730195725651038, -0.0136197149921864]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.024270509831248427, -0.01763355756877419]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02121320343559643, -0.021213203435596423]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017633557568774196, -0.02427050983124842]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.013619714992186407, -0.026730195725651034]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009270509831248426, -0.028531695488854605]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.004693033951206931, -0.02963065021785413]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-5.510910596163089e-18, -0.03]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.00469303395120692, -0.029630650217854132]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.009270509831248417, -0.02853169548885461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.013619714992186399, -0.026730195725651038]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017633557568774185, -0.024270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02121320343559642, -0.02121320343559643]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02427050983124842, -0.0176335575687742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026730195725651034, -0.01361971499218641]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.028531695488854605, -0.009270509831248427]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02963065021785413, -0.004693033951206933]}]})
rotor_external_contour = vm.Contour2D.dict_to_object({'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Contour2D', 'primitives': [{'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, 0.05]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, 0.05]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, -0.05]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, -0.05]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, -0.05599999999999999]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, -0.05599999999999999]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.049999999999999996, -0.056]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.049999999999999996, -0.056]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05599999999999999, -0.05000000000000001]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05599999999999999, -0.05000000000000001]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.056, 0.049999999999999996]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.056, 0.049999999999999996]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}], 'basis_primitives': [{'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, 0.05]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, 0.05]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, -0.05]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, -0.05]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, -0.05599999999999999]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, -0.05599999999999999]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.049999999999999996, -0.056]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.049999999999999996, -0.056]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05599999999999999, -0.05000000000000001]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05599999999999999, -0.05000000000000001]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.056, 0.049999999999999996]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.056, 0.049999999999999996]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, 0.05]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.055999999999999994, -0.05]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, -0.05599999999999999]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.049999999999999996, -0.056]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05599999999999999, -0.05000000000000001]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.056, 0.049999999999999996]}]})
rotor_magnet_contour = vm.Contour2D.dict_to_object({'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Contour2D', 'primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0, 0.095]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.08077747210701756]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.08077747210701756]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [4.336808689942018e-18, -4.573879611178527e-17]}, 'radius': 0.09500000000000004, 'is_trigo': False, 'angle1': 1.0165344923425688, 'angle2': 2.1250581612472246, 'angle': 1.1085236689046558}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.08077747210701756]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.08077747210701756]}, 'object_class': 'volmdlr.core.LineSegment2D'}], 'basis_primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0, 0.095]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.08077747210701756]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.08077747210701756]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [4.336808689942018e-18, -4.573879611178527e-17]}, 'radius': 0.09500000000000004, 'is_trigo': False, 'angle1': 1.0165344923425688, 'angle2': 2.1250581612472246, 'angle': 1.1085236689046558}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.08077747210701756]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.08077747210701756]}, 'object_class': 'volmdlr.core.LineSegment2D'}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.08077747210701756]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03663579417895213, 0.08765168900185243]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.022354752318127908, 0.09233236187163824]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.007514267348593662, 0.09470235364664313]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.007514267348593656, 0.09470235364664313]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02235475231812793, 0.09233236187163824]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03663579417895213, 0.08765168900185243]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05000000000000001, 0.08077747210701756]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05, 0.055999999999999994]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05, 0.055999999999999994]}]})
nb_poles = 4
rotor_magnet_contours = [rotor_magnet_contour,]
for i in range(1, nb_poles):
contour = rotor_magnet_contour.Rotation(vm.O2D, angle=i*math.pi/2, copy=True)
rotor_magnet_contours.append(contour)
stator_internal_contour = vm.Contour2D.dict_to_object({'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Contour2D', 'primitives': [{'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.1]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02176493326044296, 0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02176493326044296, 0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, 0.17027918542728054]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0, 0.17400000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, 0.17027918542728054]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173426, 0.17027918542728054]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.17400000000000002, 'is_trigo': False, 'angle1': 1.3636218017252826, 'angle2': 1.7779708518645108, 'angle': 0.4143490501392282}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173426, 0.17027918542728054]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02176493326044296, 0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02176493326044296, 0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.1]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01622264624736139, 0.09918077307993205]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.1]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215561, 0.09160254037844388]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-2.6733053435393526e-17, -2.030577005775927e-16]}, 'radius': 0.10049875621120913, 'is_trigo': False, 'angle1': 1.1468662036877606, 'angle2': 1.4711276743037345, 'angle': 0.3242614706159739}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215561, 0.09160254037844388]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215561, 0.10459292143521046]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215561, 0.10459292143521046]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04011752395993525, 0.11301545629335558]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04011752395993525, 0.11301545629335558]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05414363720866572, 0.16536162357033266]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.087, 0.15068842025849236]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05414363720866572, 0.16536162357033266]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1161355482186148, 0.12957057706115924]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [3.2867877316145964e-16, 6.177564192484373e-16]}, 'radius': 0.17399999999999932, 'is_trigo': False, 'angle1': 0.8400230261269829, 'angle2': 1.2543720762662127, 'angle': 0.41434905013922985}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1161355482186148, 0.12957057706115924]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0778154941903682, 0.09125052303291262]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0778154941903682, 0.09125052303291262]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784438, 0.09459292143521045]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784438, 0.09459292143521045]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784438, 0.08160254037844387]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06363961030678927, 0.07778174593052024]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784438, 0.08160254037844387]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844386, 0.05866025403784441]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.2468035368457382e-16, -1.6248660041655238e-16]}, 'radius': 0.10049875621120911, 'is_trigo': False, 'angle1': 0.6232674280894617, 'angle2': 0.9475288987054358, 'angle': 0.3242614706159741}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844386, 0.0586602540378444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521043, 0.0661602540378444]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521043, 0.0661602540378444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0912505230329126, 0.07781549419036822]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0912505230329126, 0.07781549419036822]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1295705770611592, 0.11613554821861483]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849233, 0.08700000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1295705770611592, 0.11613554821861483]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033263, 0.054143637208665746]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [8.276436788999663e-17, 8.975862275846609e-17]}, 'radius': 0.1739999999999999, 'is_trigo': False, 'angle1': 0.31642425052868456, 'angle2': 0.730773300667913, 'angle': 0.41434905013922846}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033263, 0.054143637208665746]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335556, 0.04011752395993527]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335556, 0.04011752395993527]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521044, 0.04883974596215563]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521044, 0.04883974596215563]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, 0.04133974596215563]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09400439217788162, 0.03554116277314279]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, 0.04133974596215563]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10000000000000002, 0.010000000000000023]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-8.993521131875374e-16, -2.790353009353527e-16]}, 'radius': 0.10049875621120985, 'is_trigo': False, 'angle1': 0.0996686524911641, 'angle2': 0.4239301231071358, 'angle': 0.32426147061597166}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, 0.010000000000000007]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, 0.010000000000000007]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, 0.010000000000000007]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, 0.021764933260442966]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, 0.021764933260442966]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, 0.03579104650917344]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17400000000000002, 1.0654427152581974e-17]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, 0.03579104650917344]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, -0.03579104650917341]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.311247760626477e-16, 1.3631648839701518e-17]}, 'radius': 0.17400000000000015, 'is_trigo': False, 'angle1': -0.20717452506961392, 'angle2': 0.20717452506961392, 'angle': 0.41434905013922785}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, -0.03579104650917341]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, -0.021764933260442952]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, -0.021764933260442952]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, -0.009999999999999993]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, -0.009999999999999993]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, -0.009999999999999993]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09918077307993205, -0.01622264624736138]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, -0.009999999999999993]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844388, -0.0413397459621556]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-2.2411551149754653e-16, 6.761925212014346e-17]}, 'radius': 0.10049875621120913, 'is_trigo': False, 'angle1': -0.4239301231071364, 'angle2': -0.0996686524911624, 'angle': 0.324261470615974}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, -0.041339745962155595]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521046, -0.04883974596215559]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521046, -0.04883974596215559]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335558, -0.04011752395993523]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335558, -0.04011752395993523]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033266, -0.05414363720866569]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849236, -0.08699999999999997]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033266, -0.05414363720866569]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.12957057706115924, -0.11613554821861477]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.8515786235400805e-16, -1.4714655497584672e-16]}, 'radius': 0.1739999999999997, 'is_trigo': False, 'angle1': -0.7307733006679131, 'angle2': -0.3164242505286842, 'angle': 0.4143490501392289}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.12957057706115924, -0.11613554821861477]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09125052303291262, -0.07781549419036818]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09125052303291262, -0.07781549419036818]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521045, -0.06616025403784437]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521045, -0.06616025403784437]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844389, -0.05866025403784437]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07778174593052024, -0.06363961030678926]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844389, -0.05866025403784437]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784442, -0.08160254037844386]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.0902840638587913e-16, 2.80673721336895e-16]}, 'radius': 0.10049875621120932, 'is_trigo': False, 'angle1': -0.9475288987054346, 'angle2': -0.6232674280894611, 'angle': 0.32426147061597344}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.058660254037844424, -0.08160254037844385]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784442, -0.09459292143521042]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784442, -0.09459292143521042]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07781549419036825, -0.09125052303291259]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07781549419036825, -0.09125052303291259]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861485, -0.1295705770611592]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08700000000000006, -0.1506884202584923]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861485, -0.1295705770611592]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.054143637208665794, -0.16536162357033263]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-6.573575463229242e-16, 1.2355128384968829e-15]}, 'radius': 0.1740000000000014, 'is_trigo': False, 'angle1': -1.25437207626621, 'angle2': -0.8400230261269854, 'angle': 0.41434905013922463}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.054143637208665794, -0.16536162357033263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0401175239599353, -0.11301545629335555]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0401175239599353, -0.11301545629335555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215566, -0.10459292143521043]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215566, -0.10459292143521043]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215565, -0.09160254037844386]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03554116277314281, -0.09400439217788162]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215565, -0.09160254037844386]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01000000000000005, -0.10000000000000002]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.1727162454588165e-16, -6.442408349218695e-16]}, 'radius': 0.10049875621120828, 'is_trigo': False, 'angle1': -1.4711276743037347, 'angle2': -1.146866203687758, 'angle': 0.32426147061597677}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000012, -0.1]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000014, -0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000014, -0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021764933260442973, -0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021764933260442973, -0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173446, -0.17027918542728054]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.1308854305163948e-17, -0.17400000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173446, -0.17027918542728054]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173405, -0.17027918542728054]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.4234042381691588e-17, 2.331107130002626e-16]}, 'radius': 0.17400000000000024, 'is_trigo': False, 'angle1': -1.7779708518645105, 'angle2': -1.3636218017252828, 'angle': 0.41434905013922774}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173405, -0.17027918542728054]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021764933260442945, -0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021764933260442945, -0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999986, -0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999986, -0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999988, -0.1]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.016222646247361375, -0.09918077307993205]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999988, -0.1]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.041339745962155595, -0.09160254037844388]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.1004987562112089, 'is_trigo': False, 'angle1': -1.9947264499020332, 'angle2': -1.6704649792860584, 'angle': 0.32426147061597477}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215559, -0.0916025403784439]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04883974596215558, -0.10459292143521046]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04883974596215558, -0.10459292143521046]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993522, -0.11301545629335559]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993522, -0.11301545629335559]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05414363720866568, -0.16536162357033268]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08699999999999997, -0.15068842025849236]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05414363720866568, -0.16536162357033268]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861477, -0.12957057706115926]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [3.70653365839329e-16, 5.935223768667494e-16]}, 'radius': 0.1740000000000007, 'is_trigo': False, 'angle1': -2.3015696274628086, 'angle2': -1.887220577323582, 'angle': 0.4143490501392266}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861477, -0.12957057706115926]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036818, -0.09125052303291263]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036818, -0.09125052303291263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784435, -0.09459292143521048]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784435, -0.09459292143521048]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784436, -0.08160254037844389]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06363961030678925, -0.07778174593052026]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784436, -0.08160254037844389]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844385, -0.05866025403784443]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.1004987562112089, 'is_trigo': False, 'angle1': -2.5183252255003317, 'angle2': -2.194063754884357, 'angle': 0.32426147061597455}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844383, -0.05866025403784443]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09459292143521042, -0.06616025403784444]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09459292143521042, -0.06616025403784444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291256, -0.07781549419036826]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291256, -0.07781549419036826]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115915, -0.11613554821861487]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584923, -0.08700000000000009]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115915, -0.11613554821861487]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033263, -0.054143637208665815]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.3123245265337952e-16, -5.809437402731471e-18]}, 'radius': 0.1739999999999999, 'is_trigo': False, 'angle1': -2.8251684030611077, 'angle2': -2.4108193529218793, 'angle': 0.4143490501392284}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033263, -0.054143637208665815]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11301545629335555, -0.04011752395993532]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11301545629335555, -0.04011752395993532]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521043, -0.048839745962155665]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521043, -0.048839745962155665]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844384, -0.04133974596215566]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09400439217788162, -0.035541162773142815]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844384, -0.04133974596215566]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, -0.010000000000000064]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.6669209775476033e-15, -3.9129794287779357e-16]}, 'radius': 0.10049875621120721, 'is_trigo': False, 'angle1': -3.0419240010986326, 'angle2': -2.7176625304826527, 'angle': 0.3242614706159799}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, -0.01000000000000002]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, -0.010000000000000021]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, -0.010000000000000021]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, -0.02176493326044298]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, -0.02176493326044298]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, -0.03579104650917346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17400000000000002, -3.196328145774592e-17]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, -0.03579104650917346]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, 0.03579104650917339]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.1854129343774616e-16, -2.2719414732835864e-17]}, 'radius': 0.17400000000000024, 'is_trigo': False, 'angle1': 2.9344181285201794, 'angle2': -2.9344181285201794, 'angle': 0.4143490501392275}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, 0.03579104650917339]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, 0.021764933260442938]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, 0.021764933260442938]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, 0.00999999999999998]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, 0.00999999999999998]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, 0.009999999999999981]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09918077307993205, 0.01622264624736137]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, 0.009999999999999981]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0916025403784439, 0.041339745962155595]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [5.149715067397098e-16, -8.630974361485694e-17]}, 'radius': 0.10049875621120943, 'is_trigo': False, 'angle1': 2.717662530482658, 'angle2': 3.0419240010986313, 'angle': 0.3242614706159732}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0916025403784439, 0.04133974596215555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521048, 0.04883974596215554]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521048, 0.04883974596215554]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1130154562933556, 0.040117523959935175]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1130154562933556, 0.040117523959935175]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033268, 0.05414363720866561]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584924, 0.0869999999999999]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033268, 0.05414363720866561]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115932, 0.11613554821861471]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.8976277816168484e-16, 1.3754265283906457e-16]}, 'radius': 0.1739999999999998, 'is_trigo': False, 'angle1': 2.410819352921881, 'angle2': 2.8251684030611095, 'angle': 0.4143490501392284}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115932, 0.11613554821861471]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291267, 0.07781549419036814]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291267, 0.07781549419036814]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0945929214352105, 0.06616025403784431]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0945929214352105, 0.06616025403784431]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844392, 0.05866025403784432]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0777817459305203, 0.06363961030678922]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844392, 0.05866025403784432]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784446, 0.08160254037844383]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.6279046601097631e-15, 1.6846140302077266e-15]}, 'radius': 0.1004987562112066, 'is_trigo': False, 'angle1': 2.194063754884355, 'angle2': 2.5183252255003374, 'angle': 0.32426147061598254}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784443, 0.08160254037844383]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784444, 0.09459292143521042]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784444, 0.09459292143521042]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036826, 0.09125052303291256]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036826, 0.09125052303291256]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861487, 0.12957057706115915]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08700000000000009, 0.1506884202584923]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861487, 0.12957057706115915]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.054143637208665815, 0.16536162357033263]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.9113612032239482e-16, 4.279936410867517e-16]}, 'radius': 0.17399999999999957, 'is_trigo': False, 'angle1': 1.8872205773235815, 'angle2': 2.301569627462811, 'angle': 0.4143490501392293}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.054143637208665815, 0.16536162357033263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993532, 0.11301545629335555]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993532, 0.11301545629335555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.048839745962155665, 0.10459292143521043]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.048839745962155665, 0.10459292143521043]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215566, 0.09160254037844384]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035541162773142815, 0.09400439217788162]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215566, 0.09160254037844384]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.010000000000000064, 0.1]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.9129794287779357e-16, 1.6669209775476033e-15]}, 'radius': 0.10049875621120721, 'is_trigo': False, 'angle1': 1.670464979286057, 'angle2': 1.9947264499020372, 'angle': 0.3242614706159801}], 'basis_primitives': [{'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.1]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02176493326044296, 0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02176493326044296, 0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, 0.17027918542728054]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0, 0.17400000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, 0.17027918542728054]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173426, 0.17027918542728054]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.17400000000000002, 'is_trigo': False, 'angle1': 1.3636218017252826, 'angle2': 1.7779708518645108, 'angle': 0.4143490501392282}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173426, 0.17027918542728054]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02176493326044296, 0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02176493326044296, 0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.1]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01622264624736139, 0.09918077307993205]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.1]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215561, 0.09160254037844388]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-2.6733053435393526e-17, -2.030577005775927e-16]}, 'radius': 0.10049875621120913, 'is_trigo': False, 'angle1': 1.1468662036877606, 'angle2': 1.4711276743037345, 'angle': 0.3242614706159739}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215561, 0.09160254037844388]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215561, 0.10459292143521046]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215561, 0.10459292143521046]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04011752395993525, 0.11301545629335558]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04011752395993525, 0.11301545629335558]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05414363720866572, 0.16536162357033266]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.087, 0.15068842025849236]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05414363720866572, 0.16536162357033266]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1161355482186148, 0.12957057706115924]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [3.2867877316145964e-16, 6.177564192484373e-16]}, 'radius': 0.17399999999999932, 'is_trigo': False, 'angle1': 0.8400230261269829, 'angle2': 1.2543720762662127, 'angle': 0.41434905013922985}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1161355482186148, 0.12957057706115924]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0778154941903682, 0.09125052303291262]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0778154941903682, 0.09125052303291262]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784438, 0.09459292143521045]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784438, 0.09459292143521045]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784438, 0.08160254037844387]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06363961030678927, 0.07778174593052024]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784438, 0.08160254037844387]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844386, 0.05866025403784441]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.2468035368457382e-16, -1.6248660041655238e-16]}, 'radius': 0.10049875621120911, 'is_trigo': False, 'angle1': 0.6232674280894617, 'angle2': 0.9475288987054358, 'angle': 0.3242614706159741}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844386, 0.0586602540378444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521043, 0.0661602540378444]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521043, 0.0661602540378444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0912505230329126, 0.07781549419036822]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0912505230329126, 0.07781549419036822]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1295705770611592, 0.11613554821861483]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849233, 0.08700000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1295705770611592, 0.11613554821861483]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033263, 0.054143637208665746]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [8.276436788999663e-17, 8.975862275846609e-17]}, 'radius': 0.1739999999999999, 'is_trigo': False, 'angle1': 0.31642425052868456, 'angle2': 0.730773300667913, 'angle': 0.41434905013922846}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033263, 0.054143637208665746]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335556, 0.04011752395993527]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335556, 0.04011752395993527]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521044, 0.04883974596215563]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521044, 0.04883974596215563]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, 0.04133974596215563]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09400439217788162, 0.03554116277314279]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, 0.04133974596215563]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10000000000000002, 0.010000000000000023]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-8.993521131875374e-16, -2.790353009353527e-16]}, 'radius': 0.10049875621120985, 'is_trigo': False, 'angle1': 0.0996686524911641, 'angle2': 0.4239301231071358, 'angle': 0.32426147061597166}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, 0.010000000000000007]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, 0.010000000000000007]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, 0.010000000000000007]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, 0.021764933260442966]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, 0.021764933260442966]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, 0.03579104650917344]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17400000000000002, 1.0654427152581974e-17]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, 0.03579104650917344]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, -0.03579104650917341]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.311247760626477e-16, 1.3631648839701518e-17]}, 'radius': 0.17400000000000015, 'is_trigo': False, 'angle1': -0.20717452506961392, 'angle2': 0.20717452506961392, 'angle': 0.41434905013922785}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, -0.03579104650917341]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, -0.021764933260442952]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, -0.021764933260442952]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, -0.009999999999999993]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, -0.009999999999999993]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, -0.009999999999999993]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09918077307993205, -0.01622264624736138]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, -0.009999999999999993]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844388, -0.0413397459621556]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-2.2411551149754653e-16, 6.761925212014346e-17]}, 'radius': 0.10049875621120913, 'is_trigo': False, 'angle1': -0.4239301231071364, 'angle2': -0.0996686524911624, 'angle': 0.324261470615974}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, -0.041339745962155595]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521046, -0.04883974596215559]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521046, -0.04883974596215559]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335558, -0.04011752395993523]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335558, -0.04011752395993523]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033266, -0.05414363720866569]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849236, -0.08699999999999997]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033266, -0.05414363720866569]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.12957057706115924, -0.11613554821861477]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.8515786235400805e-16, -1.4714655497584672e-16]}, 'radius': 0.1739999999999997, 'is_trigo': False, 'angle1': -0.7307733006679131, 'angle2': -0.3164242505286842, 'angle': 0.4143490501392289}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.12957057706115924, -0.11613554821861477]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09125052303291262, -0.07781549419036818]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09125052303291262, -0.07781549419036818]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521045, -0.06616025403784437]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521045, -0.06616025403784437]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844389, -0.05866025403784437]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07778174593052024, -0.06363961030678926]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844389, -0.05866025403784437]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784442, -0.08160254037844386]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.0902840638587913e-16, 2.80673721336895e-16]}, 'radius': 0.10049875621120932, 'is_trigo': False, 'angle1': -0.9475288987054346, 'angle2': -0.6232674280894611, 'angle': 0.32426147061597344}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.058660254037844424, -0.08160254037844385]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784442, -0.09459292143521042]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784442, -0.09459292143521042]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07781549419036825, -0.09125052303291259]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07781549419036825, -0.09125052303291259]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861485, -0.1295705770611592]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08700000000000006, -0.1506884202584923]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861485, -0.1295705770611592]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.054143637208665794, -0.16536162357033263]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-6.573575463229242e-16, 1.2355128384968829e-15]}, 'radius': 0.1740000000000014, 'is_trigo': False, 'angle1': -1.25437207626621, 'angle2': -0.8400230261269854, 'angle': 0.41434905013922463}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.054143637208665794, -0.16536162357033263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0401175239599353, -0.11301545629335555]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0401175239599353, -0.11301545629335555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215566, -0.10459292143521043]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215566, -0.10459292143521043]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215565, -0.09160254037844386]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03554116277314281, -0.09400439217788162]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215565, -0.09160254037844386]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01000000000000005, -0.10000000000000002]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.1727162454588165e-16, -6.442408349218695e-16]}, 'radius': 0.10049875621120828, 'is_trigo': False, 'angle1': -1.4711276743037347, 'angle2': -1.146866203687758, 'angle': 0.32426147061597677}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000012, -0.1]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000014, -0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000014, -0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021764933260442973, -0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021764933260442973, -0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173446, -0.17027918542728054]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.1308854305163948e-17, -0.17400000000000002]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173446, -0.17027918542728054]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173405, -0.17027918542728054]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.4234042381691588e-17, 2.331107130002626e-16]}, 'radius': 0.17400000000000024, 'is_trigo': False, 'angle1': -1.7779708518645105, 'angle2': -1.3636218017252828, 'angle': 0.41434905013922774}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173405, -0.17027918542728054]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021764933260442945, -0.11793301815030346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021764933260442945, -0.11793301815030346]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999986, -0.115]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999986, -0.115]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999988, -0.1]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.016222646247361375, -0.09918077307993205]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999988, -0.1]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.041339745962155595, -0.09160254037844388]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.1004987562112089, 'is_trigo': False, 'angle1': -1.9947264499020332, 'angle2': -1.6704649792860584, 'angle': 0.32426147061597477}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215559, -0.0916025403784439]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04883974596215558, -0.10459292143521046]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04883974596215558, -0.10459292143521046]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993522, -0.11301545629335559]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993522, -0.11301545629335559]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05414363720866568, -0.16536162357033268]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08699999999999997, -0.15068842025849236]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05414363720866568, -0.16536162357033268]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861477, -0.12957057706115926]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [3.70653365839329e-16, 5.935223768667494e-16]}, 'radius': 0.1740000000000007, 'is_trigo': False, 'angle1': -2.3015696274628086, 'angle2': -1.887220577323582, 'angle': 0.4143490501392266}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861477, -0.12957057706115926]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036818, -0.09125052303291263]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036818, -0.09125052303291263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784435, -0.09459292143521048]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784435, -0.09459292143521048]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784436, -0.08160254037844389]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06363961030678925, -0.07778174593052026]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784436, -0.08160254037844389]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844385, -0.05866025403784443]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.1004987562112089, 'is_trigo': False, 'angle1': -2.5183252255003317, 'angle2': -2.194063754884357, 'angle': 0.32426147061597455}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844383, -0.05866025403784443]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09459292143521042, -0.06616025403784444]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09459292143521042, -0.06616025403784444]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291256, -0.07781549419036826]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291256, -0.07781549419036826]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115915, -0.11613554821861487]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584923, -0.08700000000000009]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115915, -0.11613554821861487]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033263, -0.054143637208665815]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.3123245265337952e-16, -5.809437402731471e-18]}, 'radius': 0.1739999999999999, 'is_trigo': False, 'angle1': -2.8251684030611077, 'angle2': -2.4108193529218793, 'angle': 0.4143490501392284}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033263, -0.054143637208665815]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11301545629335555, -0.04011752395993532]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11301545629335555, -0.04011752395993532]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521043, -0.048839745962155665]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521043, -0.048839745962155665]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844384, -0.04133974596215566]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09400439217788162, -0.035541162773142815]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844384, -0.04133974596215566]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, -0.010000000000000064]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.6669209775476033e-15, -3.9129794287779357e-16]}, 'radius': 0.10049875621120721, 'is_trigo': False, 'angle1': -3.0419240010986326, 'angle2': -2.7176625304826527, 'angle': 0.3242614706159799}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, -0.01000000000000002]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, -0.010000000000000021]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, -0.010000000000000021]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, -0.02176493326044298]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, -0.02176493326044298]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, -0.03579104650917346]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17400000000000002, -3.196328145774592e-17]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, -0.03579104650917346]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, 0.03579104650917339]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [2.1854129343774616e-16, -2.2719414732835864e-17]}, 'radius': 0.17400000000000024, 'is_trigo': False, 'angle1': 2.9344181285201794, 'angle2': -2.9344181285201794, 'angle': 0.4143490501392275}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, 0.03579104650917339]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, 0.021764933260442938]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, 0.021764933260442938]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, 0.00999999999999998]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, 0.00999999999999998]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, 0.009999999999999981]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09918077307993205, 0.01622264624736137]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, 0.009999999999999981]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0916025403784439, 0.041339745962155595]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [5.149715067397098e-16, -8.630974361485694e-17]}, 'radius': 0.10049875621120943, 'is_trigo': False, 'angle1': 2.717662530482658, 'angle2': 3.0419240010986313, 'angle': 0.3242614706159732}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0916025403784439, 0.04133974596215555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521048, 0.04883974596215554]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521048, 0.04883974596215554]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1130154562933556, 0.040117523959935175]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1130154562933556, 0.040117523959935175]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033268, 0.05414363720866561]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584924, 0.0869999999999999]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033268, 0.05414363720866561]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115932, 0.11613554821861471]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.8976277816168484e-16, 1.3754265283906457e-16]}, 'radius': 0.1739999999999998, 'is_trigo': False, 'angle1': 2.410819352921881, 'angle2': 2.8251684030611095, 'angle': 0.4143490501392284}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115932, 0.11613554821861471]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291267, 0.07781549419036814]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291267, 0.07781549419036814]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0945929214352105, 0.06616025403784431]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0945929214352105, 0.06616025403784431]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844392, 0.05866025403784432]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0777817459305203, 0.06363961030678922]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844392, 0.05866025403784432]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784446, 0.08160254037844383]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.6279046601097631e-15, 1.6846140302077266e-15]}, 'radius': 0.1004987562112066, 'is_trigo': False, 'angle1': 2.194063754884355, 'angle2': 2.5183252255003374, 'angle': 0.32426147061598254}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784443, 0.08160254037844383]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784444, 0.09459292143521042]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784444, 0.09459292143521042]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036826, 0.09125052303291256]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036826, 0.09125052303291256]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861487, 0.12957057706115915]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08700000000000009, 0.1506884202584923]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861487, 0.12957057706115915]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.054143637208665815, 0.16536162357033263]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-1.9113612032239482e-16, 4.279936410867517e-16]}, 'radius': 0.17399999999999957, 'is_trigo': False, 'angle1': 1.8872205773235815, 'angle2': 2.301569627462811, 'angle': 0.4143490501392293}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.054143637208665815, 0.16536162357033263]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993532, 0.11301545629335555]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993532, 0.11301545629335555]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.048839745962155665, 0.10459292143521043]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'point1': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.048839745962155665, 0.10459292143521043]}, 'point2': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215566, 0.09160254037844384]}, 'object_class': 'volmdlr.core.LineSegment2D'}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Arc2D', 'interior': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035541162773142815, 0.09400439217788162]}, 'start': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215566, 0.09160254037844384]}, 'end': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.010000000000000064, 0.1]}, 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.9129794287779357e-16, 1.6669209775476033e-15]}, 'radius': 0.10049875621120721, 'is_trigo': False, 'angle1': 1.670464979286057, 'angle2': 1.9947264499020372, 'angle': 0.3242614706159801}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.1]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01, 0.115]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02176493326044296, 0.11793301815030346]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, 0.17027918542728054]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017991966758156102, 0.17306729654147085]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [6.938893903907228e-18, 0.17400000000000002]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0179919667581561, 0.17306729654147085]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03579104650917344, 0.17027918542728054]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.02176493326044296, 0.11793301815030346]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.115]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.01, 0.1]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.018064820892167755, 0.09886183412284996]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026010992116576107, 0.0970743441343356]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03378632325399181, 0.09464927026119521]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215559, 0.0916025403784439]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215561, 0.10459292143521046]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04011752395993525, 0.11301545629335558]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05414363720866572, 0.16536162357033266]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0709521479941271, 0.15887665874828655]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08700000000000001, 0.15068842025849236]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10211514854734377, 0.14088469199013043]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861483, 0.1295705770611592]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0778154941903682, 0.09125052303291262]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784438, 0.09459292143521045]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784438, 0.08160254037844387]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06507551086885813, 0.07658444936902745]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07106335201775947, 0.07106335201775948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07658444936902745, 0.06507551086885814]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844387, 0.05866025403784441]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521043, 0.0661602540378444]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0912505230329126, 0.07781549419036822]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1295705770611592, 0.11613554821861483]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14088469199013043, 0.10211514854734378]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849233, 0.08700000000000002]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15887665874828652, 0.07095214799412711]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033263, 0.054143637208665746]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335556, 0.04011752395993527]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521044, 0.04883974596215563]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844387, 0.04133974596215563]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0946492702611952, 0.033786323253991835]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09707434413433559, 0.026010992116576134]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09886183412284996, 0.018064820892167783]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10000000000000002, 0.010000000000000024]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, 0.010000000000000007]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, 0.021764933260442966]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, 0.03579104650917344]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17306729654147085, 0.01799196675815612]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17400000000000004, 2.0570542743608748e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17306729654147085, -0.01799196675815609]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17027918542728054, -0.035791046509173405]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11793301815030346, -0.021764933260442952]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.115, -0.009999999999999993]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1, -0.009999999999999993]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09886183412284996, -0.018064820892167755]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0970743441343356, -0.02601099211657611]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0946492702611952, -0.033786323253991814]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09160254037844388, -0.0413397459621556]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.10459292143521046, -0.04883974596215559]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11301545629335558, -0.04011752395993523]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16536162357033266, -0.05414363720866569]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15887665874828655, -0.07095214799412707]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.15068842025849236, -0.08699999999999998]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14088469199013046, -0.10211514854734373]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.12957057706115926, -0.11613554821861477]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09125052303291262, -0.07781549419036818]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09459292143521045, -0.06616025403784437]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08160254037844389, -0.05866025403784437]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07658444936902747, -0.06507551086885811]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0710633520177595, -0.07106335201775946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06507551086885816, -0.07658444936902742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.05866025403784442, -0.08160254037844386]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06616025403784442, -0.09459292143521042]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07781549419036825, -0.09125052303291259]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11613554821861485, -0.1295705770611592]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1021151485473438, -0.1408846919901304]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.08700000000000006, -0.1506884202584923]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.07095214799412715, -0.1588766587482865]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.054143637208665794, -0.1653616235703326]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0401175239599353, -0.11301545629335555]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04883974596215566, -0.10459292143521043]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.04133974596215565, -0.09160254037844386]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.03378632325399187, -0.09464927026119521]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.026010992116576166, -0.0970743441343356]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.018064820892167818, -0.09886183412284996]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000054, -0.10000000000000002]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.010000000000000014, -0.115]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.021764933260442973, -0.11793301815030346]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.035791046509173446, -0.17027918542728054]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.017991966758156123, -0.17306729654147085]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.0356254573877131e-17, -0.17400000000000002]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.017991966758156095, -0.17306729654147085]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.035791046509173426, -0.17027918542728054]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.021764933260442945, -0.11793301815030346]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999986, -0.115]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.009999999999999988, -0.1]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01806482089216775, -0.09886183412284996]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0260109921165761, -0.0970743441343356]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03378632325399181, -0.09464927026119521]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.041339745962155595, -0.09160254037844388]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04883974596215558, -0.10459292143521046]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993522, -0.11301545629335559]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05414363720866568, -0.16536162357033268]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07095214799412704, -0.15887665874828655]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08699999999999995, -0.15068842025849236]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10211514854734371, -0.14088469199013046]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861478, -0.12957057706115926]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036818, -0.09125052303291263]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784435, -0.09459292143521048]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784436, -0.08160254037844389]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0650755108688581, -0.07658444936902747]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07106335201775944, -0.07106335201775951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07658444936902742, -0.06507551086885817]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844383, -0.05866025403784444]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09459292143521042, -0.06616025403784444]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291256, -0.07781549419036826]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115915, -0.11613554821861487]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14088469199013037, -0.10211514854734384]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584923, -0.08700000000000008]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.15887665874828652, -0.07095214799412719]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033263, -0.05414363720866582]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11301545629335555, -0.04011752395993532]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521043, -0.048839745962155665]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844384, -0.04133974596215566]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0946492702611952, -0.03378632325399189]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09707434413433559, -0.026010992116576193]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09886183412284996, -0.018064820892167856]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, -0.010000000000000096]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, -0.010000000000000021]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, -0.02176493326044298]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728054, -0.03579104650917346]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17306729654147085, -0.017991966758156147]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17400000000000002, -5.047499034846478e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17306729654147088, 0.01799196675815606]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17027918542728057, 0.03579104650917337]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11793301815030346, 0.021764933260442938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.115, 0.00999999999999998]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1, 0.009999999999999981]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09886183412284995, 0.018064820892167748]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0970743441343356, 0.0260109921165761]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09464927026119521, 0.03378632325399182]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09160254037844388, 0.041339745962155616]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10459292143521048, 0.04883974596215554]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1130154562933556, 0.040117523959935175]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16536162357033268, 0.05414363720866561]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.15887665874828658, 0.07095214799412697]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1506884202584924, 0.08699999999999988]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14088469199013057, 0.10211514854734363]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.12957057706115935, 0.11613554821861469]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09125052303291267, 0.07781549419036814]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0945929214352105, 0.06616025403784431]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08160254037844392, 0.05866025403784432]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07658444936902752, 0.06507551086885807]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07106335201775955, 0.07106335201775944]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.0650755108688582, 0.07658444936902742]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.05866025403784445, 0.08160254037844383]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06616025403784444, 0.09459292143521042]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07781549419036826, 0.09125052303291256]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11613554821861487, 0.12957057706115915]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.10211514854734384, 0.1408846919901304]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.08700000000000009, 0.1506884202584923]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.07095214799412718, 0.15887665874828652]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.054143637208665815, 0.16536162357033263]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04011752395993532, 0.11301545629335555]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.048839745962155665, 0.10459292143521043]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.04133974596215566, 0.09160254037844384]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.03378632325399188, 0.0946492702611952]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.02601099211657618, 0.0970743441343356]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.01806482089216784, 0.09886183412284996]}]}
)
stator_external_contour = vm.Contour2D.dict_to_object({'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Contour2D', 'primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Circle2D', 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.2, 'angle': 6.283185307179586, 'utd_geo_points': False, 'points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.2, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902757, 0.03128689300804618]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, 0.06180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1782013048376736, 0.09079809994790936]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1618033988749895, 0.11755705045849463]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730953, 0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849463, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790937, 0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498949, 0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046185, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.2246467991473533e-17, 0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046164, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06180339887498947, 0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790934, 0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849461, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1414213562373095, 0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16180339887498948, 0.11755705045849466]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17820130483767357, 0.09079809994790938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1902113032590307, 0.06180339887498951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902754, 0.0312868930080462]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.2, 2.4492935982947065e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902757, -0.03128689300804615]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19021130325903074, -0.06180339887498946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1782013048376736, -0.09079809994790934]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1618033988749895, -0.11755705045849461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14142135623730953, -0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849466, -0.16180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790938, -0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.061803398874989514, -0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046206, -0.19753766811902754]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.6739403974420595e-17, -0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046136, -0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498945, -0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790933, -0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849459, -0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730948, -0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16180339887498948, -0.11755705045849468]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17820130483767357, -0.0907980999479094]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, -0.06180339887498953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902754, -0.03128689300804622]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.2, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902757, 0.03128689300804618]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, 0.06180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1782013048376736, 0.09079809994790936]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1618033988749895, 0.11755705045849463]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730953, 0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849463, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790937, 0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498949, 0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046185, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.2246467991473533e-17, 0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046164, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06180339887498947, 0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790934, 0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849461, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1414213562373095, 0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16180339887498948, 0.11755705045849466]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17820130483767357, 0.09079809994790938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1902113032590307, 0.06180339887498951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902754, 0.0312868930080462]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.2, 2.4492935982947065e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902757, -0.03128689300804615]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19021130325903074, -0.06180339887498946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1782013048376736, -0.09079809994790934]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1618033988749895, -0.11755705045849461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14142135623730953, -0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849466, -0.16180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790938, -0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.061803398874989514, -0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046206, -0.19753766811902754]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.6739403974420595e-17, -0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046136, -0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498945, -0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790933, -0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849459, -0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730948, -0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16180339887498948, -0.11755705045849468]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17820130483767357, -0.0907980999479094]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, -0.06180339887498953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902754, -0.03128689300804622]}]}], 'basis_primitives': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core.Circle2D', 'center': {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.0, 0.0]}, 'radius': 0.2, 'angle': 6.283185307179586, 'utd_geo_points': False, 'points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.2, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902757, 0.03128689300804618]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, 0.06180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1782013048376736, 0.09079809994790936]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1618033988749895, 0.11755705045849463]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730953, 0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849463, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790937, 0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498949, 0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046185, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.2246467991473533e-17, 0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046164, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06180339887498947, 0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790934, 0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849461, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1414213562373095, 0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16180339887498948, 0.11755705045849466]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17820130483767357, 0.09079809994790938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1902113032590307, 0.06180339887498951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902754, 0.0312868930080462]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.2, 2.4492935982947065e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902757, -0.03128689300804615]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19021130325903074, -0.06180339887498946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1782013048376736, -0.09079809994790934]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1618033988749895, -0.11755705045849461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14142135623730953, -0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849466, -0.16180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790938, -0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.061803398874989514, -0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046206, -0.19753766811902754]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.6739403974420595e-17, -0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046136, -0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498945, -0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790933, -0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849459, -0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730948, -0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16180339887498948, -0.11755705045849468]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17820130483767357, -0.0907980999479094]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, -0.06180339887498953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902754, -0.03128689300804622]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.2, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902757, 0.03128689300804618]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, 0.06180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1782013048376736, 0.09079809994790936]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1618033988749895, 0.11755705045849463]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730953, 0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849463, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790937, 0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498949, 0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046185, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.2246467991473533e-17, 0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046164, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06180339887498947, 0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790934, 0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849461, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1414213562373095, 0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16180339887498948, 0.11755705045849466]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17820130483767357, 0.09079809994790938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1902113032590307, 0.06180339887498951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902754, 0.0312868930080462]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.2, 2.4492935982947065e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902757, -0.03128689300804615]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19021130325903074, -0.06180339887498946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1782013048376736, -0.09079809994790934]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1618033988749895, -0.11755705045849461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14142135623730953, -0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849466, -0.16180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790938, -0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.061803398874989514, -0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046206, -0.19753766811902754]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.6739403974420595e-17, -0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046136, -0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498945, -0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790933, -0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849459, -0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730948, -0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16180339887498948, -0.11755705045849468]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17820130483767357, -0.0907980999479094]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, -0.06180339887498953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902754, -0.03128689300804622]}]}], '_utd_analysis': False, 'tessel_points': [{'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.2, 0.0]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902757, 0.03128689300804618]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, 0.06180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1782013048376736, 0.09079809994790936]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1618033988749895, 0.11755705045849463]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730953, 0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849463, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790937, 0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498949, 0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046185, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [1.2246467991473533e-17, 0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046164, 0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.06180339887498947, 0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790934, 0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849461, 0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1414213562373095, 0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.16180339887498948, 0.11755705045849466]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.17820130483767357, 0.09079809994790938]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1902113032590307, 0.06180339887498951]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902754, 0.0312868930080462]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.2, 2.4492935982947065e-17]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19753766811902757, -0.03128689300804615]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.19021130325903074, -0.06180339887498946]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1782013048376736, -0.09079809994790934]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.1618033988749895, -0.11755705045849461]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.14142135623730953, -0.1414213562373095]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.11755705045849466, -0.16180339887498948]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.09079809994790938, -0.17820130483767357]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.061803398874989514, -0.1902113032590307]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-0.031286893008046206, -0.19753766811902754]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [-3.6739403974420595e-17, -0.2]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.031286893008046136, -0.19753766811902757]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.06180339887498945, -0.19021130325903074]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.09079809994790933, -0.1782013048376736]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.11755705045849459, -0.1618033988749895]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.14142135623730948, -0.14142135623730953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.16180339887498948, -0.11755705045849468]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.17820130483767357, -0.0907980999479094]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.1902113032590307, -0.06180339887498953]}, {'name': '', 'package_version': None, 'object_class': 'volmdlr.core_compiled.Point2D', 'vector': [0.19753766811902754, -0.03128689300804622]}]}
)
stator_external_contour=stator_external_contour.primitives[0]
stator_external_contour.MPLPlot()
# circle=vm.Circle2D(vm.Point2D([0.4,0.5]),0.3)
# ax=circle.MPLPlot()
# circle.discretise(40,ax)
# print(list(com
# all_triangle_elements=rotor.mesh()
# offset=rotor_external_contour.polygon.Offset(-0.025)
# offset.MPLPlot()binations(range(3),3)))
rotor_magnet=[rotor_magnet_contours[0]]
exterior_rotor_contour=rotor_magnet_contours+[rotor_external_contour]
all_rotor=[rotor_internal_contour,rotor_external_contour]
rotor_external=[rotor_external_contour]
rotor_internal=[rotor_internal_contour]
stator_internal=[stator_internal_contour]
stator_external=[stator_external_contour]
# polygon=offset.select_reapaired_polygon([])
# polygon.MPLPlot()
ax=rotor_external_contour.MPLPlot()
# rotor=vmmesh.Mesher(rotor_magnet,[],40)
# all_rotor=[rotor_internal_contour,rotor_external_contour]
exterior_rotor_contour=rotor_magnet_contours+[rotor_external_contour]
rotor_mesh=vmmesh.Mesher(rotor_internal,exterior_rotor_contour,[],60)
all_rotor_triangle_elements=rotor_mesh.generate_mesh(6,False)
ax=stator_external_contour.MPLPlot()
print(isinstance(stator_external_contour,vm.Circle2D))
stator_internal_contour.MPLPlot(ax=ax)
stator_mesh=vmmesh.Mesher(stator_internal,stator_external,[],20)
all_stator_triangle_elements=stator_mesh.generate_mesh(None,True)
stator_internal_contour.MPLPlot()
stator_internal_contour.get_pattern().MPLPlot()
pattern=stator_internal_contour.get_pattern()
# pattern.Rotation(pattern.CenterOfMass(),math.pi).MPLPlot()
all_patterns=pattern.contour_from_pattern(stator_internal_contour.CenterOfMass())
print(len(all_patterns))
ax=plt.subplot()
for p in all_patterns:
p.MPLPlot()
# pattern=stator_internal_contour.get_pattern()
# pattern.Rotation(pattern.CenterOfMass(),math.pi).MPLPlot()
all_patterns=stator_internal_contour.contour_from_pattern()
pattern_mesh=vmmesh.Mesher([],all_patterns,[],40)
pattern_tri=pattern_mesh.generate_mesh(None,False)
elements_group_0=vmmesh.ElementsGroup(pattern_tri,'first_elements_group')
mesh=vmmesh.Mesh([elements_group_0])
mesh.plot()
# ax=rotor_external_contour.MPLPlot()
# offset=rotor_external_contour.polygon.Offset(-0.02)
# print(len(offset.points))
# offset.MPLPlot()
rec=rotor_external_contour.polygon.bounding_rectangle()
ax=rotor_external_contour.polygon.MPLPlot()
offset=rotor_external_contour.polygon.Offset(-rec.min_length()/2)
offset.MPLPlot(ax=ax)
offset.select_reapaired_polygon([]).MPLPlot(ax=ax)
# offset.MPLPlot(ax=ax)
p1 = vm.Point2D([2,2])
p2 = vm.Point2D([3,3])
p3 = vm.Point2D([3,4])
p4 = vm.Point2D([2.8,4])
p5 = vm.Point2D([1.9,5])
p6 = vm.Point2D([1,4])
p7=vm.Point2D([1.45,4.5])
triangle=vm.Triangle2D([p1,p2,p3])
print(triangle.area)
print(triangle.Area())
# a1=vm.Arc2D(vm.Point2D([0.1,0.1]),vm.Point2D([0.2,0.6]),vm.Point2D([0.8,0.1]))
# ax=a1.MPLPlot()
# a1.mesh_arc(40,ax)
# print(a1.mesh_arc(40,ax))
ax=plt.subplot()
l1 = vm.LineSegment2D(p1,p2)
l1.discretise(1,ax)
# l2 = vm.LineSegment2D(p2,p3)
# l3=vm.LineSegment2D(p3,p4)
# l4 = vm.LineSegment2D(p4,p5)
# l5 = vm.LineSegment2D(p5,p1)
l1 = vm.LineSegment2D(p1,p2)
l2 = vm.LineSegment2D(p2,p3)
l3=vm.LineSegment2D(p3,p4)
l4 = vm.LineSegment2D(p4,p5)
l5 = vm.LineSegment2D(p5,p6)
l6 = vm.LineSegment2D(p6,p1)
contour=vm.Contour2D([l1,l2,l3,l4,l5,l6])
mesher=vmmesh.Mesher([contour],[],1.5)
all_triangle_elements=mesher.mesh(None)
# mesher=vmmesh.Mesher([contour],[],1.5)
# all_triangle_elements=mesher.mesh()
# element_group=vmmesh.ElementsGroup(all_triangle_elements,'element_group')
# mesh=vmmesh.Mesh([element_group])
# mesh.plot()
# element_group=vmmesh.ElementsGroup(all_triangle_elements,'element_group')
# mesh=vmmesh.Mesh([element_group])
p1 = vm.Point2D([1,1])
p2 = vm.Point2D([3,1])
p3 = vm.Point2D([3,3])
p4 = vm.Point2D([1,3])
l1 = vm.LineSegment2D(p1,p2)
l2 = vm.LineSegment2D(p1,p3)
l3=vm.LineSegment2D(p4,p2)
l4 = vm.LineSegment2D(p3,p4)
inter=l2.line_intersection(l3)
polygon=vm.Polygon2D([p1,p2,p4,p3,inter,p1])
polygon.MPLPlot()
polygons=polygon.repair_intersections([])
for polygon in polygons:
polygon.MPLPlot()
stator_internal_contour.polygon.MPLPlot()
|
gpl-3.0
|
bsipocz/astropy
|
examples/coordinates/plot_obs-planning.py
|
1
|
6338
|
# -*- coding: utf-8 -*-
"""
===================================================================
Determining and plotting the altitude/azimuth of a celestial object
===================================================================
This example demonstrates coordinate transformations and the creation of
visibility curves to assist with observing run planning.
In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33.
The altitude-azimuth coordinates are then found using
`astropy.coordinates.EarthLocation` and `astropy.time.Time` objects.
This example is meant to demonstrate the capabilities of the
`astropy.coordinates` package. For more convenient and/or complex observation
planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_
package.
-------------------
*By: Erik Tollerud, Kelle Cruz*
*License: BSD*
-------------------
"""
##############################################################################
# Let's suppose you are planning to visit picturesque Bear Mountain State Park
# in New York, USA. You're bringing your telescope with you (of course), and
# someone told you M33 is a great target to observe there. You happen to know
# you're free at 11:00 pm local time, and you want to know if it will be up.
# Astropy can answer that.
#
# Import numpy and matplotlib. For the latter, use a nicer set of plot
# parameters and set up support for plotting/converting quantities.
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style, quantity_support
plt.style.use(astropy_mpl_style)
quantity_support()
##############################################################################
# Import the packages necessary for finding coordinates and making
# coordinate transformations
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
##############################################################################
# `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object
# names and retrieve coordinates.
#
# Get the coordinates of M33:
m33 = SkyCoord.from_name('M33')
##############################################################################
# Use `astropy.coordinates.EarthLocation` to provide the location of Bear
# Mountain and set the time to 11pm EDT on 2012 July 12:
bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2012-7-12 23:00:00') - utcoffset
##############################################################################
# `astropy.coordinates.EarthLocation.get_site_names` and
# `~astropy.coordinates.EarthLocation.get_site_names` can be used to get
# locations of major observatories.
#
# Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as
# observed from Bear Mountain at 11pm on 2012 July 12.
m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain))
print("M33's Altitude = {0.alt:.2}".format(m33altaz))
##############################################################################
# This is helpful since it turns out M33 is barely above the horizon at this
# time. It's more informative to find M33's airmass over the course of
# the night.
#
# Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm
# and 7am EDT:
midnight = Time('2012-7-13 00:00:00') - utcoffset
delta_midnight = np.linspace(-2, 10, 100)*u.hour
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
m33altazs_July13night = m33.transform_to(frame_July13night)
##############################################################################
# convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute:
m33airmasss_July13night = m33altazs_July13night.secz
##############################################################################
# Plot the airmass as a function of time:
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
##############################################################################
# Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000
# evenly spaced times between noon on July 12 and noon on July 13:
from astropy.coordinates import get_sun
delta_midnight = np.linspace(-12, 12, 1000)*u.hour
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13)
##############################################################################
# Do the same with `~astropy.coordinates.get_moon` to find when the moon is
# up. Be aware that this will need to download a 10MB file from the internet
# to get a precise location of the moon.
from astropy.coordinates import get_moon
moon_July12_to_13 = get_moon(times_July12_to_13)
moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13)
##############################################################################
# Find the alt,az coordinates of M33 at those same times:
m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13)
##############################################################################
# Make a beautiful figure illustrating nighttime and the altitudes of M33 and
# the Sun over that time:
plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun')
plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon')
plt.scatter(delta_midnight, m33altazs_July12_to_13.alt,
c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8,
cmap='viridis')
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12*u.hour, 12*u.hour)
plt.xticks((np.arange(13)*2-12)*u.hour)
plt.ylim(0*u.deg, 90*u.deg)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
|
bsd-3-clause
|
marianotepper/sgft
|
sgft/test_temperatures.py
|
1
|
3814
|
from __future__ import absolute_import
import graph_tool.draw as gt_draw
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn.apionly as sns
import numpy as np
import pickle
import os
from scipy.spatial.distance import pdist, squareform
import sgft.temperatures as temperatures
import sgft.graph_spectrogram as spec
import sgft.utils as utils
def show_spectrogram(s, cmap):
s /= np.atleast_2d(np.max(s, axis=0))
s_range = np.max(s) - np.min(s)
s = utils.smoothstep(s,
min_edge=np.min(s) + s_range / 3,
max_edge=np.max(s) - s_range / 10)
spec.plot(s, cmap)
def process_us():
name = 'US'
year = 2014
n_neigh = 6
graph = temperatures.country_network(name, 2014, n_neigh=6)
pos = graph.vertex_properties['pos']
station_values = graph.vertex_properties['station_values']
weight = graph.edge_properties['weights']
x_signal = station_values.a
n_eigs = 500
# n_eigs = graph.num_vertices() - 1
alpha = -1e-3
file_name = '{0}_{1}_k{2}'
file_name = file_name.format(name, year, n_neigh)
if os.path.exists(file_name + '_spec.pickle'):
with open(file_name + '_spec.pickle', 'r') as f:
spec_weighted = pickle.load(f)
factory = pickle.load(f)
else:
factory = spec.PageRankSGFT(graph, n_eigs, alpha, weight=weight)
spec_weighted = factory.compute(x_signal)
with open(file_name + '_spec.pickle', 'w') as f:
pickle.dump(spec_weighted, f)
pickle.dump(factory, f)
palette = sns.cubehelix_palette(256, start=2, rot=0, dark=0.15, light=1)
cmap = colors.ListedColormap(palette)
plt.figure()
show_spectrogram(spec_weighted[0:30, :], cmap=cmap)
plt.savefig(file_name + '_spec.pdf', dpi=300)
temperatures.plot(graph, weight, pos, station_values, file_name)
spec.show_window(factory, .5 * (graph.num_vertices() + 1), weight, pos,
file_name + '_window1.png')
spec.show_window(factory, .25 * (graph.num_vertices() + 1), weight, pos,
file_name + '_window2.png')
spec.show_window(factory, .75 * (graph.num_vertices() + 1), weight, pos,
file_name + '_window3.png')
show_spec_in_graph(graph, 417, spec_weighted, pos, weight,
file_name + '_florida')
def show_spec_in_graph(graph, vertex, spec, pos, weight, file_name):
dist = 1.0 - squareform(pdist(spec.T, 'cosine'))
plt.figure()
plt.stem(dist[vertex, :], markerfmt=' ')
rim = graph.new_vertex_property('vector<double>')
rim.set_2d_array(np.array([0, 0, 0, 1]))
rim[graph.vertex(vertex)] = [0.8941176471, 0.1019607843, 0.1098039216, 1]
rim_width = graph.new_vertex_property('float', vals=0.5)
rim_width.a[vertex] = 2
shape = graph.new_vertex_property('int', vals=0)
shape[graph.vertex(vertex)] = 2
size = graph.new_vertex_property('double', vals=10)
size.a[vertex] = 15
correlation = graph.new_vertex_property('double', vals=2)
correlation.a = dist[vertex, :]
vorder = graph.new_vertex_property('int', vals=0)
vorder.a[vertex] = 1
palette = sns.cubehelix_palette(256)
cmap = colors.ListedColormap(palette)
gt_draw.graph_draw(graph, pos=pos, vertex_color=rim, vorder=vorder,
vertex_pen_width=rim_width,
vertex_shape=shape, vertex_fill_color=correlation,
vcmap=cmap, vertex_size=size, edge_color=[0, 0, 0, 0.7],
edge_pen_width=weight, output=file_name + '.png',
output_size=(1200, 1200))
plt.figure()
utils.plot_colorbar(cmap, np.arange(0, 1.01, 0.2), file_name)
if __name__ == '__main__':
process_us()
plt.show()
|
mit
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/pandas/tseries/tests/test_timezones.py
|
7
|
66322
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta, tzinfo, date
import nose
import numpy as np
import pytz
from distutils.version import LooseVersion
from pandas.types.dtypes import DatetimeTZDtype
from pandas import (Index, Series, DataFrame, isnull, Timestamp)
from pandas import DatetimeIndex, to_datetime, NaT
from pandas import tslib
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
from pytz import NonExistentTimeError
import pandas.util.testing as tm
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
set_timezone)
from pandas.compat import lrange, zip
try:
import pytz # noqa
except ImportError:
pass
try:
import dateutil
except ImportError:
pass
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, '-07:00')
fixed_off_no_name = FixedOffset(-330, None)
class TestTimeZoneSupportPytz(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
tm._skip_if_no_pytz()
def tz(self, tz):
# Construct a timezone object from a string. Overridden in subclass to
# parameterize tests.
return pytz.timezone(tz)
def tzstr(self, tz):
# Construct a timezone string from a string. Overridden in subclass to
# parameterize tests.
return tz
def localize(self, tz, x):
return tz.localize(x)
def cmptz(self, tz1, tz2):
# Compare two timezones. Overridden in subclass to parameterize
# tests.
return tz1.zone == tz2.zone
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# Values are unmodified
self.assertTrue(np.array_equal(rng.asi8, rng_eastern.asi8))
self.assertTrue(self.cmptz(rng_eastern.tz, self.tz('US/Eastern')))
def test_utc_to_local_no_modify_explicit(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tz('US/Eastern'))
# Values are unmodified
self.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
self.assertEqual(rng_eastern.tz, self.tz('US/Eastern'))
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tzstr('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
self.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
self.assertRaises(NonExistentTimeError, rng.tz_localize,
self.tzstr('US/Eastern'))
def test_localize_utc_conversion_explicit(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tz('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
self.assertTrue(np.array_equal(converted.asi8, expected_naive.asi8))
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
self.assertRaises(NonExistentTimeError, rng.tz_localize,
self.tz('US/Eastern'))
def test_timestamp_tz_localize(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern'))
self.assertEqual(result.hour, expected.hour)
self.assertEqual(result, expected)
def test_timestamp_tz_localize_explicit(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tz('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern'))
self.assertEqual(result.hour, expected.hour)
self.assertEqual(result, expected)
def test_timestamp_constructed_by_date_and_tz(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern'))
self.assertEqual(result.hour, expected.hour)
self.assertEqual(result, expected)
def test_timestamp_constructed_by_date_and_tz_explicit(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern'))
self.assertEqual(result.hour, expected.hour)
self.assertEqual(result, expected)
def test_timestamp_to_datetime_tzoffset(self):
# tzoffset
from dateutil.tz import tzoffset
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
self.assertEqual(expected, result)
def test_timedelta_push_over_dst_boundary(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
self.assertEqual(result, expected)
def test_timedelta_push_over_dst_boundary_explicit(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern'))
self.assertEqual(result, expected)
def test_tz_localize_dti(self):
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize(self.tzstr('US/Eastern'))
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L', tz='utc')
self.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(self.tzstr('US/Pacific'))
self.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00',
freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
self.assertRaises(pytz.NonExistentTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
def test_tz_localize_empty_series(self):
# #2248
ts = Series()
ts2 = ts.tz_localize('utc')
self.assertTrue(ts2.index.tz == pytz.utc)
ts2 = ts.tz_localize(self.tzstr('US/Eastern'))
self.assertTrue(self.cmptz(ts2.index.tz, self.tz('US/Eastern')))
def test_astimezone(self):
utc = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utc.tz_convert(self.tzstr('US/Eastern'))
result = utc.astimezone(self.tzstr('US/Eastern'))
self.assertEqual(expected, result)
tm.assertIsInstance(result, Timestamp)
def test_create_with_tz(self):
stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
self.assertEqual(stamp.hour, 5)
rng = date_range('3/11/2012 04:00', periods=10, freq='H',
tz=self.tzstr('US/Eastern'))
self.assertEqual(stamp, rng[1])
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
self.assertIs(utc_stamp.tzinfo, pytz.utc)
self.assertEqual(utc_stamp.hour, 5)
stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
self.assertEqual(utc_stamp.hour, 5)
def test_create_with_fixed_tz(self):
off = FixedOffset(420, '+07:00')
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
self.assertEqual(off, rng.tz)
rng2 = date_range(start, periods=len(rng), tz=off)
self.assert_index_equal(rng, rng2)
rng3 = date_range('3/11/2012 05:00:00+07:00',
'6/11/2012 05:00:00+07:00')
self.assertTrue((rng.values == rng3.values).all())
def test_create_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
self.assertEqual(off, rng.tz)
idx = Index([start, end])
self.assertEqual(off, idx.tz)
def test_date_range_localize(self):
rng = date_range('3/11/2012 03:00', periods=15, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],
tz='US/Eastern')
rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')
rng3 = rng3.tz_localize('US/Eastern')
self.assert_index_equal(rng, rng3)
# DST transition time
val = rng[0]
exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')
self.assertEqual(val.hour, 3)
self.assertEqual(exp.hour, 3)
self.assertEqual(val, exp) # same UTC value
self.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range('3/11/2012 00:00', periods=2, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],
tz='US/Eastern')
self.assert_index_equal(rng, rng2)
exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')
self.assertEqual(exp.hour, 0)
self.assertEqual(rng[0], exp)
exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')
self.assertEqual(exp.hour, 1)
self.assertEqual(rng[1], exp)
rng = date_range('3/11/2012 00:00', periods=10, freq='H',
tz='US/Eastern')
self.assertEqual(rng[2].hour, 3)
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
tz = self.tz('US/Eastern')
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
self.assertEqual(stamp, expected)
self.assertEqual(stamp.tzinfo, expected.tzinfo)
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# test not valid for dateutil timezones.
# self.assertIn('EDT', repr(rng_eastern[0].tzinfo))
self.assertTrue('EDT' in repr(rng_eastern[0].tzinfo) or 'tzfile' in
repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
conv = idx[0].tz_convert(self.tzstr('US/Pacific'))
expected = idx.tz_convert(self.tzstr('US/Pacific'))[0]
self.assertEqual(conv, expected)
def test_pass_dates_localize_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(self.tzstr('US/Eastern'))
fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
self.assertEqual(conv.tz, fromdates.tz)
self.assert_numpy_array_equal(conv.values, fromdates.values)
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
self.assertTrue((rng.hour == 0).all())
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz=self.tzstr('America/Atikokan'))
expected = np.arange(10, dtype=np.int32)
self.assert_numpy_array_equal(dr.hour, expected)
def test_with_tz(self):
tz = self.tz('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=offsets.Hour())
self.assertIs(dr.tz, pytz.utc)
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
self.assertIs(central.tz, tz)
comp = self.localize(tz, central[0].to_pydatetime().replace(
tzinfo=None)).tzinfo
self.assertIs(central[0].tz, comp)
# compare vs a localized tz
comp = self.localize(tz,
dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo
self.assertIs(central[0].tz, comp)
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
self.assertRaises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',
tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
self.assert_index_equal(dr_utc, localized)
def test_with_tz_ambiguous_times(self):
tz = self.tz('US/Eastern')
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=offsets.Hour())
self.assertRaises(pytz.NonExistentTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=offsets.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=offsets.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=offsets.Minute(30), tz=pytz.utc)
def test_ambiguous_infer(self):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
tz = self.tz('US/Eastern')
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='infer')
self.assert_index_equal(dr, localized)
with tm.assert_produces_warning(FutureWarning):
localized_old = di.tz_localize(tz, infer_dst=True)
self.assert_index_equal(dr, localized_old)
self.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous='infer'))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous='infer')
self.assert_index_equal(localized, localized_infer)
with tm.assert_produces_warning(FutureWarning):
localized_infer_old = dr.tz_localize(tz, infer_dst=True)
self.assert_index_equal(localized, localized_infer_old)
def test_ambiguous_flags(self):
# November 6, 2011, fall back, repeat 2 AM hour
tz = self.tz('US/Eastern')
# Pass in flags to determine right dst transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
self.assert_index_equal(dr, localized)
self.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
self.assert_index_equal(dr, localized)
localized = di.tz_localize(tz,
ambiguous=np.array(is_dst).astype('bool'))
self.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
self.assert_index_equal(dr, localized)
# Test duplicate times where infer_dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
self.assertRaises(Exception, di.tz_localize, tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
self.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
self.assert_index_equal(localized, localized_is_dst)
# construction with an ambiguous end-point
# GH 11626
tz = self.tzstr("Europe/London")
def f():
date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London", freq="H")
self.assertRaises(pytz.AmbiguousTimeError, f)
times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H",
tz=tz, ambiguous='infer')
self.assertEqual(times[0], Timestamp('2013-10-26 23:00', tz=tz,
freq="H"))
if dateutil.__version__ != LooseVersion('2.6.0'):
# GH 14621
self.assertEqual(times[-1], Timestamp('2013-10-27 01:00', tz=tz,
freq="H"))
def test_ambiguous_nat(self):
tz = self.tz('US/Eastern')
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='NaT')
times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00',
'11/06/2011 03:00']
di_test = DatetimeIndex(times, tz='US/Eastern')
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
self.assert_numpy_array_equal(di_test.values, localized.values)
def test_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# gh-14402
t = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
def f():
t.tz_localize('US/Central')
self.assertRaises(pytz.AmbiguousTimeError, f)
result = t.tz_localize('US/Central', ambiguous=True)
self.assertEqual(result, expected0)
result = t.tz_localize('US/Central', ambiguous=False)
self.assertEqual(result, expected1)
s = Series([t])
expected0 = Series([expected0])
expected1 = Series([expected1])
def f():
s.dt.tz_localize('US/Central')
self.assertRaises(pytz.AmbiguousTimeError, f)
result = s.dt.tz_localize('US/Central', ambiguous=True)
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=[True])
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=False)
assert_series_equal(result, expected1)
result = s.dt.tz_localize('US/Central', ambiguous=[False])
assert_series_equal(result, expected1)
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00']
index = DatetimeIndex(times)
tz = 'US/Eastern'
self.assertRaises(NonExistentTimeError,
index.tz_localize, tz=tz)
self.assertRaises(NonExistentTimeError,
index.tz_localize, tz=tz, errors='raise')
result = index.tz_localize(tz=tz, errors='coerce')
test_times = ['2015-03-08 01:00-05:00', 'NaT',
'2015-03-08 03:00-04:00']
expected = DatetimeIndex(test_times)\
.tz_localize('UTC').tz_convert('US/Eastern')
tm.assert_index_equal(result, expected)
# test utility methods
def test_infer_tz(self):
eastern = self.tz('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = self.localize(eastern, _start)
end = self.localize(eastern, _end)
assert (tools._infer_tzinfo(start, end) is self.localize(
eastern, _start).tzinfo)
assert (tools._infer_tzinfo(start, None) is self.localize(
eastern, _start).tzinfo)
assert (tools._infer_tzinfo(None, end) is self.localize(eastern,
_end).tzinfo)
start = utc.localize(_start)
end = utc.localize(_end)
assert (tools._infer_tzinfo(start, end) is utc)
end = self.localize(eastern, _end)
self.assertRaises(Exception, tools._infer_tzinfo, start, end)
self.assertRaises(Exception, tools._infer_tzinfo, end, start)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10,
tz=self.tzstr('US/Eastern'))
expected = date_range('1/1/2000', periods=10, tz=self.tz('US/Eastern'))
self.assert_index_equal(result, expected)
def test_take_dont_lose_meta(self):
tm._skip_if_no_pytz()
rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern'))
result = rng.take(lrange(5))
self.assertEqual(result.tz, rng.tz)
self.assertEqual(result.freq, rng.freq)
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_localize(self.tzstr('US/Eastern'))
rng_repr = repr(rng_eastern)
self.assertIn('2010-04-13 00:00:00', rng_repr)
def test_index_astype_asobject_tzinfos(self):
# #1345
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern'))
objs = rng.asobject
for i, x in enumerate(objs):
exval = rng[i]
self.assertEqual(x, exval)
self.assertEqual(x.tzinfo, exval.tzinfo)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
self.assertEqual(x, exval)
self.assertEqual(x.tzinfo, exval.tzinfo)
def test_localized_at_time_between_time(self):
from datetime import time
rng = date_range('4/16/2012', '5/1/2012', freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(self.tzstr('US/Eastern'))
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
assert_series_equal(result, expected)
self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))
def test_string_index_alias_tz_aware(self):
rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern'))
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts['1/3/2000']
self.assertAlmostEqual(result, ts[2])
def test_fixed_offset(self):
dates = [datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)]
result = to_datetime(dates)
self.assertEqual(result.tz, fixed_off)
def test_fixedtz_topydatetime(self):
dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)])
result = to_datetime(dates).to_pydatetime()
self.assert_numpy_array_equal(dates, result)
result = to_datetime(dates)._mpl_repr()
self.assert_numpy_array_equal(dates, result)
def test_convert_tz_aware_datetime_datetime(self):
# #1581
tz = self.tz('US/Eastern')
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)]
dates_aware = [self.localize(tz, x) for x in dates]
result = to_datetime(dates_aware)
self.assertTrue(self.cmptz(result.tz, self.tz('US/Eastern')))
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
self.assert_numpy_array_equal(converted.asi8, ex_vals)
self.assertIs(converted.tz, pytz.utc)
def test_to_datetime_utc(self):
from dateutil.parser import parse
arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object)
result = to_datetime(arr, utc=True)
self.assertIs(result.tz, pytz.utc)
def test_to_datetime_tzlocal(self):
from dateutil.parser import parse
from dateutil.tz import tzlocal
dt = parse('2012-06-13T01:39:00Z')
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
self.assertIs(result.tz, pytz.utc)
rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
self.assertIs(result.tz, pytz.utc)
def test_frame_no_datetime64_dtype(self):
# after 7822
# these retain the timezones on dict construction
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)
self.assertEqual(e['B'].dtype, tz_expected)
# GH 2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({'dr': dr,
'dr_tz': dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz': datetimes_with_tz})
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong')
# it works!
dr.hour
def test_tz_convert_unsorted(self):
dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')
dr = dr.tz_convert(self.tzstr('US/Eastern'))
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
def test_shift_localized(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
result = dr_tz.shift(1, '10T')
self.assertEqual(result.tz, dr_tz.tz)
def test_tz_aware_asfreq(self):
dr = date_range('2011-12-01', '2012-07-20', freq='D',
tz=self.tzstr('US/Eastern'))
s = Series(np.random.randn(len(dr)), index=dr)
# it works!
s.asfreq('T')
def test_static_tzinfo(self):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST'))
index.hour
index[0]
def test_tzaware_datetime_to_index(self):
d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))]
index = DatetimeIndex(d)
self.assertTrue(self.cmptz(index.tz, self.tz('US/Eastern')))
def test_date_range_span_dst_transition(self):
# #1778
# Standard -> Daylight Savings Time
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
self.assertTrue((dr.hour == 0).all())
dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern'))
self.assertTrue((dr.hour == 0).all())
def test_convert_datetime_list(self):
dr = date_range('2012-06-02', periods=10,
tz=self.tzstr('US/Eastern'), name='foo')
dr2 = DatetimeIndex(list(dr), name='foo')
self.assert_index_equal(dr, dr2)
self.assertEqual(dr.tz, dr2.tz)
self.assertEqual(dr2.name, 'foo')
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_reset_index(self):
dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern'))
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
self.assertEqual(xp, rs)
def test_dateutil_tzoffset_support(self):
from dateutil.tz import tzoffset
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo)]
series = Series(data=values, index=index)
self.assertEqual(series.index.tz, tzinfo)
# it works! #2443
repr(series.index[0])
def test_getitem_pydatetime_tz(self):
index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00',
freq='H', tz=self.tzstr('Europe/Berlin'))
ts = Series(index=index, data=index.hour)
time_pandas = Timestamp('2012-12-24 17:00',
tz=self.tzstr('Europe/Berlin'))
time_datetime = self.localize(
self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0))
self.assertEqual(ts[time_pandas], ts[time_datetime])
def test_index_drop_dont_lose_tz(self):
# #2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
self.assertTrue(ind.tz is not None)
def test_datetimeindex_tz(self):
""" Test different DatetimeIndex constructions with timezone
Follow-up of #4229
"""
arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']
idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern'))
idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2,
tz=self.tzstr('US/Eastern'))
idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern'))
idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern'))
for other in [idx2, idx3, idx4]:
self.assert_index_equal(idx1, other)
def test_datetimeindex_tz_nat(self):
idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')),
NaT])
self.assertTrue(isnull(idx[1]))
self.assertTrue(idx[0].tzinfo is not None)
class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
_multiprocess_can_split_ = True
def setUp(self):
tm._skip_if_no_dateutil()
def tz(self, tz):
"""
Construct a dateutil timezone.
Use tslib.maybe_get_tz so that we get the filename on the tz right
on windows. See #7337.
"""
return tslib.maybe_get_tz('dateutil/' + tz)
def tzstr(self, tz):
""" Construct a timezone string from a string. Overridden in subclass
to parameterize tests. """
return 'dateutil/' + tz
def cmptz(self, tz1, tz2):
""" Compare two timezones. Overridden in subclass to parameterize
tests. """
return tz1 == tz2
def localize(self, tz, x):
return x.replace(tzinfo=tz)
def test_utc_with_system_utc(self):
# Skipped on win32 due to dateutil bug
tm._skip_if_windows()
from pandas.tslib import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))
def test_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2009-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = np.array([13, 14, 13], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2009-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = np.array([9, 9, 9], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2008-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = np.array([13, 14, 13], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2008-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = np.array([9, 9, 9], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
tz = self.tzstr('US/Eastern')
# sorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2009-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = np.array([13, 14, 13], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2009-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = np.array([9, 9, 9], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2008-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = np.array([13, 14, 13], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2008-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = np.array([9, 9, 9], dtype=np.int32)
self.assert_numpy_array_equal(ut.hour, expected)
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize('UTC')
idx = idx.tz_convert('Europe/Moscow')
expected = np.repeat(np.array([3, 4, 5], dtype=np.int32),
np.array([n, n, 1]))
self.assert_numpy_array_equal(idx.hour, expected)
def test_tslib_tz_convert_dst(self):
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
# Start DST
idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,
0, 1, 3, 4, 5], dtype=np.int32),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
self.assert_numpy_array_equal(idx.hour, expected)
idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.int32),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
self.assert_numpy_array_equal(idx.hour, expected)
# End DST
idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([19, 20, 21, 22, 23,
0, 1, 1, 2, 3, 4], dtype=np.int32),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
self.assert_numpy_array_equal(idx.hour, expected)
idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10], dtype=np.int32),
np.array([n, n, n, n, n, n, n, n, n,
n, n, n, 1]))
self.assert_numpy_array_equal(idx.hour, expected)
# daily
# Start DST
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
self.assert_numpy_array_equal(idx.hour,
np.array([19, 19], dtype=np.int32))
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
self.assert_numpy_array_equal(idx.hour,
np.array([5, 5], dtype=np.int32))
# End DST
idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
self.assert_numpy_array_equal(idx.hour,
np.array([20, 20], dtype=np.int32))
idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
self.assert_numpy_array_equal(idx.hour,
np.array([4, 4], dtype=np.int32))
def test_tzlocal(self):
# GH 13583
ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())
self.assertEqual(ts.tz, dateutil.tz.tzlocal())
self.assertTrue("tz='tzlocal()')" in repr(ts))
tz = tslib.maybe_get_tz('tzlocal()')
self.assertEqual(tz, dateutil.tz.tzlocal())
# get offset using normal datetime for test
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds() * 1000000000
self.assertEqual(ts.value + offset, Timestamp('2011-01-01').value)
def test_tz_localize_tzlocal(self):
# GH 13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start='2001-01-01', end='2001-03-01')
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
def test_tz_convert_tzlocal(self):
# GH 13583
# tz_convert doesn't affect to internal
dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC')
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
class TestTimeZoneCacheKey(tm.TestCase):
def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self):
tzs = pytz.common_timezones
for tz_name in tzs:
if tz_name == 'UTC':
# skip utc as it's a special case in dateutil
continue
tz_p = tslib.maybe_get_tz(tz_name)
tz_d = tslib.maybe_get_tz('dateutil/' + tz_name)
if tz_d is None:
# skip timezones that dateutil doesn't know about.
continue
self.assertNotEqual(tslib._p_tz_cache_key(
tz_p), tslib._p_tz_cache_key(tz_d))
class TestTimeZones(tm.TestCase):
_multiprocess_can_split_ = True
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
def setUp(self):
tm._skip_if_no_pytz()
def test_replace(self):
# GH 14621
# GH 7825
# replacing datetime components with and w/o presence of a timezone
dt = Timestamp('2016-01-01 09:00:00')
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00')
self.assertEqual(result, expected)
for tz in self.timezones:
dt = Timestamp('2016-01-01 09:00:00', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00', tz=tz)
self.assertEqual(result, expected)
# we preserve nanoseconds
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00.000000123', tz=tz)
self.assertEqual(result, expected)
# test all
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(year=2015, month=2, day=2, hour=0, minute=5,
second=5, microsecond=5, nanosecond=5)
expected = Timestamp('2015-02-02 00:05:05.000005005', tz=tz)
self.assertEqual(result, expected)
# error
def f():
dt.replace(foo=5)
self.assertRaises(ValueError, f)
def f():
dt.replace(hour=0.1)
self.assertRaises(ValueError, f)
# assert conversion to naive is the same as replacing tzinfo with None
dt = Timestamp('2013-11-03 01:59:59.999999-0400', tz='US/Eastern')
self.assertEqual(dt.tz_localize(None), dt.replace(tzinfo=None))
def test_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
tm._skip_if_no_dateutil()
tm._skip_if_no_pytz()
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=0))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=0))
self.assertEqual(result_pytz.value, result_dateutil.value)
self.assertEqual(result_pytz.value, 1382835600000000000)
# dateutil 2.6 buggy w.r.t. ambiguous=0
if dateutil.__version__ != LooseVersion('2.6.0'):
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self.assertEqual(result_pytz.to_pydatetime().tzname(),
result_dateutil.to_pydatetime().tzname())
self.assertEqual(str(result_pytz), str(result_dateutil))
# 1 hour difference
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=1))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=1))
self.assertEqual(result_pytz.value, result_dateutil.value)
self.assertEqual(result_pytz.value, 1382832000000000000)
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if dateutil.__version__ > LooseVersion('2.5.3'):
# GH 14621
self.assertEqual(str(result_pytz), str(result_dateutil))
self.assertEqual(result_pytz.to_pydatetime().tzname(),
result_dateutil.to_pydatetime().tzname())
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
self.assertFalse(left.equals(right))
def test_tz_localize_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_localize('US/Pacific')
exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')
self.assert_index_equal(conv, exp)
def test_tz_localize_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx in [idx1, idx2, idx3, idx4]:
localized = idx.tz_localize(tz)
expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq,
tz=tz)
tm.assert_index_equal(localized, expected)
with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
tm.assert_index_equal(reset, idx)
self.assertTrue(reset.tzinfo is None)
def test_series_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
ts = Series(1, index=rng)
result = ts.tz_localize('utc')
self.assertEqual(result.index.tz.zone, 'UTC')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
self.assertEqual(result.index.tz.zone, 'UTC')
assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
self.assertEqual(result.columns.tz.zone, 'UTC')
assert_frame_equal(result, expected.T)
# Can't localize if already tz-aware
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
ts = Series(1, index=rng)
tm.assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize,
'US/Eastern')
def test_series_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')
ts = Series(1, index=rng)
result = ts.tz_convert('Europe/Berlin')
self.assertEqual(result.index.tz.zone, 'Europe/Berlin')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
self.assertEqual(result.index.tz.zone, 'Europe/Berlin')
assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
self.assertEqual(result.columns.tz.zone, 'Europe/Berlin')
assert_frame_equal(result, expected.T)
# can't convert tz-naive
rng = date_range('1/1/2011', periods=200, freq='D')
ts = Series(1, index=rng)
tm.assertRaisesRegexp(TypeError, "Cannot convert tz-naive",
ts.tz_convert, 'US/Eastern')
def test_tz_convert_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M',
tz='UTC')
exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D',
tz='UTC')
exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H',
tz='UTC')
exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T',
tz='UTC')
exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3),
(idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
self.assertTrue(reset.tzinfo is None)
tm.assert_index_equal(reset, converted.tz_convert(
'UTC').tz_localize(None))
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
tm.assertIsInstance(result, DatetimeIndex)
self.assertEqual(result.tz, left.tz)
result = left.join(right[:-5], how=how)
tm.assertIsInstance(result, DatetimeIndex)
self.assertEqual(result.tz.zone, 'UTC')
def test_join_aware(self):
rng = date_range('1/1/2011', periods=10, freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_utc = ts.tz_localize('utc')
self.assertRaises(Exception, ts.__add__, ts_utc)
self.assertRaises(Exception, ts_utc.__add__, ts)
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=lrange(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
self.assert_index_equal(result.index, ex_index)
self.assertTrue(result.index.tz.zone == 'US/Central')
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H",
tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H",
tz="US/Eastern")
result = rng.union(rng2)
self.assertTrue(result.tz.zone == 'UTC')
def test_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
self.assertEqual(df1.index.tz, new1.index.tz)
self.assertEqual(df2.index.tz, new2.index.tz)
# # different timezones convert to UTC
# frame
df1_central = df1.tz_convert('US/Central')
new1, new2 = df1.align(df1_central)
self.assertEqual(new1.index.tz, pytz.UTC)
self.assertEqual(new2.index.tz, pytz.UTC)
# series
new1, new2 = df1[0].align(df1_central[0])
self.assertEqual(new1.index.tz, pytz.UTC)
self.assertEqual(new2.index.tz, pytz.UTC)
# combination
new1, new2 = df1.align(df1_central[0], axis=0)
self.assertEqual(new1.index.tz, pytz.UTC)
self.assertEqual(new2.index.tz, pytz.UTC)
df1[0].align(df1_central, axis=0)
self.assertEqual(new1.index.tz, pytz.UTC)
self.assertEqual(new2.index.tz, pytz.UTC)
def test_append_aware(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
utc = rng1.tz
self.assertEqual(utc, ts_result.index.tz)
# GH 7795
# different tz coerces to object dtype, not UTC
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Central')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
rng2 = date_range('8/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
ts1 = Series([1, 2, 3], index=rng1)
ts2 = Series([10, 11, 12], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00',
'2016-01-01 03:00', '2016-08-01 01:00',
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
assert_series_equal(ts_result, exp)
self.assertEqual(ts_result.index.tz, rng1.tz)
def test_append_aware_naive(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(
ts2.index.asobject)))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = lrange(100)
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(
ts2.index)))
def test_equal_join_ensure_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ts.tz_convert('Europe/Moscow')
result = ts + ts_moscow
self.assertIs(result.index.tz, pytz.utc)
result = ts_moscow + ts
self.assertIs(result.index.tz, pytz.utc)
df = DataFrame({'a': ts})
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
self.assertIs(result.index.tz, pytz.utc)
result = df_moscow + df
self.assertIs(result.index.tz, pytz.utc)
def test_arith_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
perm = np.random.permutation(100)[:90]
ts1 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('US/Eastern'))
perm = np.random.permutation(100)[:90]
ts2 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('Europe/Berlin'))
result = ts1 + ts2
uts1 = ts1.tz_convert('utc')
uts2 = ts2.tz_convert('utc')
expected = uts1 + uts2
self.assertEqual(result.index.tz, pytz.UTC)
assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng[10:90][::-1]
right = rng[20:80][::-1]
self.assertEqual(left.tz, rng.tz)
result = left.intersection(right)
self.assertEqual(result.tz, left.tz)
def test_timestamp_equality_different_timezones(self):
utc_range = date_range('1/1/2000', periods=20, tz='UTC')
eastern_range = utc_range.tz_convert('US/Eastern')
berlin_range = utc_range.tz_convert('Europe/Berlin')
for a, b, c in zip(utc_range, eastern_range, berlin_range):
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(a, c)
self.assertTrue((utc_range == eastern_range).all())
self.assertTrue((utc_range == berlin_range).all())
self.assertTrue((berlin_range == eastern_range).all())
def test_datetimeindex_tz(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
tz='US/Eastern')
rng2 = DatetimeIndex(data=rng, tz='US/Eastern')
self.assert_index_equal(rng, rng2)
def test_normalize_tz(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='US/Eastern')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='US/Eastern')
self.assert_index_equal(result, expected)
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')
self.assert_index_equal(result, expected)
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
from dateutil.tz import tzlocal
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())
self.assert_index_equal(result, expected)
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_normalize_tz_local(self):
# GH 13459
from dateutil.tz import tzlocal
timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata',
'Asia/Shanghai', 'Australia/Canberra']
for timezone in timezones:
with set_timezone(timezone):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz=tzlocal())
self.assert_index_equal(result, expected)
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_tzaware_offset(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + offsets.Hour(5)
self.assertEqual(dates[0] + offsets.Hour(5), offset[0])
# GH 6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + offsets.Hour(5)
self.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
self.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
self.assert_index_equal(offset, expected)
def test_nat(self):
# GH 5546
dates = [NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern'))
idx = idx.tz_convert('UTC')
self.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC'))
dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT]
self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx + offsets.Hour(5)
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx.tz_convert('US/Pacific')
expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT]
self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx + np.timedelta64(3, 'h')
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]
self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
wood-galaxy/FreeCAD
|
src/Mod/Plot/plotSeries/TaskPanel.py
|
26
|
17784
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Plot
from plotUtils import Paths
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(
form.items,
QtCore.SIGNAL("currentRowChanged(int)"),
self.onItem)
QtCore.QObject.connect(
form.label,
QtCore.SIGNAL("editingFinished()"),
self.onData)
QtCore.QObject.connect(
form.isLabel,
QtCore.SIGNAL("stateChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.style,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.marker,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.width,
QtCore.SIGNAL("valueChanged(double)"),
self.onData)
QtCore.QObject.connect(
form.size,
QtCore.SIGNAL("valueChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.color,
QtCore.SIGNAL("pressed()"),
self.onColor)
QtCore.QObject.connect(
form.remove,
QtCore.SIGNAL("pressed()"),
self.onRemove)
QtCore.QObject.connect(
Plot.getMdiArea(),
QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),
self.onMdiArea)
return False
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"plot_series",
"Configure series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"No label",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setText(
QtGui.QApplication.translate(
"plot_series",
"Remove serie",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "styleLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "markerLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Marker",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QListWidget, "items").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"List of available series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLineEdit, "label").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line title",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"If checked serie will not be considered for legend",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "lineStyle").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "markers").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QDoubleSpinBox, "lineWidth").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line width",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QSpinBox, "markerSize").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker size",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "color").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line and marker color",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Removes this serie",
None,
QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
"""Fill the style combo boxes with the availabel ones."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\'"
string += " (" + Line2D.lineStyles[style] + ")"
form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\'"
string += " (" + Line2D.markers[marker] + ")"
form.marker.addItem(string)
def onItem(self, row):
"""Executed when the selected item is modified."""
if not self.skip:
self.skip = True
self.item = row
self.updateUI()
self.skip = False
def onData(self):
"""Executed when the selected item data is modified."""
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(form.isLabel.isChecked()):
serie.name = None
form.label.setEnabled(False)
else:
serie.name = form.label.text()
form.label.setEnabled(True)
# Set line style and marker
style = form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(form.width.value())
serie.line.set_markersize(form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.color = self.widget(QtGui.QPushButton, "color")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
form.color.setStyleSheet(
"background-color: rgb({}, {}, {});".format(col.red(),
col.green(),
col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
"""Executed when the data serie must be removed."""
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
"""Executed when a new window is selected on the mdi area.
Keyword arguments:
subWin -- Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
plt = Plot.getPlot()
form.items.setEnabled(bool(plt))
form.label.setEnabled(bool(plt))
form.isLabel.setEnabled(bool(plt))
form.style.setEnabled(bool(plt))
form.marker.setEnabled(bool(plt))
form.width.setEnabled(bool(plt))
form.size.setEnabled(bool(plt))
form.color.setEnabled(bool(plt))
form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
form.label.setEnabled(False)
form.isLabel.setEnabled(False)
form.style.setEnabled(False)
form.marker.setEnabled(False)
form.width.setEnabled(False)
form.size.setEnabled(False)
form.color.setEnabled(False)
form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name is None:
form.isLabel.setChecked(True)
form.label.setEnabled(False)
form.label.setText("")
else:
form.isLabel.setChecked(False)
form.label.setText(serie.name)
# Set line style and marker
form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
form.style.setCurrentIndex(i)
form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
form.marker.setCurrentIndex(i)
# Set line width and marker size
form.width.setValue(serie.line.get_linewidth())
form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
form.color.setStyleSheet("background-color: rgb({}, {}, {});".format(
int(color[0] * 255),
int(color[1] * 255),
int(color[2] * 255)))
self.skip = False
def setList(self):
"""Setup the UI control values if it is possible."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.items.clear()
series = Plot.series()
for i in range(0, len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name is None:
string = string + '\"No label\"'
else:
string = string + serie.name
form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series) - 1
form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
|
lgpl-2.1
|
CforED/Machine-Learning
|
examples/ensemble/plot_partial_dependence.py
|
3
|
4833
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from six.moves.urllib.error import HTTPError
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
def main():
# fetch California housing dataset
try:
cal_housing = fetch_california_housing()
except HTTPError:
print("Failed downloading california housing data.")
return
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
iismd17/scikit-learn
|
sklearn/utils/testing.py
|
71
|
26178
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Tensorflow/source/numpy/fft/fftpack.py
|
31
|
46059
|
"""
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
from .helper import _FFTCache
_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
# We have to ensure that only a single thread can access a wsave array
# at any given time. Thus we remove it from the cache and insert it
# again after it has been used. Multiple threads might create multiple
# copies of the wsave array. This is intentional and a limitation of
# the current C code.
wsave = fft_cache.pop_twiddle_factors(n)
if wsave is None:
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache.put_twiddle_factors(n, wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a).astype(complex, copy=False)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
if n is None:
n = a.shape[axis]
output *= 1 / sqrt(n)
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is determined from the
length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2`` where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
.. versionadded:: 1.10.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
|
mit
|
ephes/scikit-learn
|
examples/preprocessing/plot_robust_scaling.py
|
221
|
2702
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
|
bsd-3-clause
|
rexshihaoren/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/IPython/core/tests/refbug.py
|
4
|
1505
|
"""Minimal script to reproduce our nasty reference counting bug.
The problem is related to https://github.com/ipython/ipython/issues/141
The original fix for that appeared to work, but John D. Hunter found a
matplotlib example which, when run twice in a row, would break. The problem
were references held by open figures to internals of Tkinter.
This code reproduces the problem that John saw, without matplotlib.
This script is meant to be called by other parts of the test suite that call it
via %run as if it were executed interactively by the user. As of 2011-05-29,
test_run.py calls it.
"""
#-----------------------------------------------------------------------------
# Module imports
#-----------------------------------------------------------------------------
import sys
from IPython import get_ipython
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# This needs to be here because nose and other test runners will import
# this module. Importing this module has potential side effects that we
# want to prevent.
if __name__ == '__main__':
ip = get_ipython()
if not '_refbug_cache' in ip.user_ns:
ip.user_ns['_refbug_cache'] = []
aglobal = 'Hello'
def f():
return aglobal
cache = ip.user_ns['_refbug_cache']
cache.append(f)
def call_f():
for func in cache:
print('lowercased:',func().lower())
|
bsd-2-clause
|
LiaoPan/scikit-learn
|
sklearn/datasets/lfw.py
|
38
|
19042
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
bsd-3-clause
|
HaydenFaulkner/phd
|
keras_code/train.py
|
1
|
8706
|
'''
train.py = used for training a model
'''
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = dir_path[:dir_path.find('/phd')+4]
if not dir_path in sys.path:
sys.path.append(dir_path)
print(sys.path)
import argparse
import datetime
import math
import time
import numpy as np
from keras_code import models
from keras import backend as K
import utilities.paths as paths
from utilities.logging import print_n_log, refresh_log
DRIVE = paths.get_drive()
# if not paths.is_cluster():
# import matplotlib.pyplot as plt
def train(model_id, identifier, model_path, nb_epoch=20, batch_size=16, load_epoch=None):
# set the channel order correctly
if K.backend() == 'theano':
K.set_image_dim_ordering('th')
K.set_image_data_format('channels_first')
else:
K.set_image_dim_ordering('tf')
K.set_image_data_format('channels_last')
training_start_time = time.clock()
t_l = [[], []]
v_l = [[], []]
v_a = [[], []]
model_path = model_path + model_id + '_' + identifier
# Load log
if not os.path.exists(model_path):
os.makedirs(model_path)
log = open(model_path + '/log.txt', "a")
print_n_log(log, '\n\n\nTraining initialised: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
# Check load epoch, if not specified or less than 0 get latest
if (load_epoch is None) or (load_epoch < 0):
load_epoch = 0
for i in range(100,-1,-1):
if os.path.isfile(model_path + '/' + model_id + '_' + identifier + '-e' + str(i) + '.h5'):
load_epoch = i
break
if load_epoch == 0:
print_n_log(log, '\nTraining model from scratch...\n\n')
model, output_classes = models.get_model_from_id(model_id, identifier, batch_size)
else:
print_n_log(log, '\nLoading past model to train from:\n')
print_n_log(log, '\n' + model_path + '/' + model_id + '_' + identifier + '-e' + str(load_epoch) + '.h5\n\n')
[t_l, v_l, v_a] = np.load(model_path + '/training_stats-e'+str(load_epoch)+'.npy')
model, output_classes = models.get_model_from_id(model_id, identifier, batch_size, load_epoch=load_epoch)
assert (model is not None), "model is None"
# Compile the model
model = models.compile_model(model_id, model)
# Load the dataset (train and val)
dataset = models.get_dataset(model_id, 'train', force_noneq=False, batch_size=batch_size)
dataset_val = models.get_dataset(model_id, 'val', force_noneq=False, batch_size=batch_size)
class_weights = dataset.get_class_weights(type='balanced')
dataset.get_dataset_statistics(log)
dataset_val.get_dataset_statistics(log)
# dataset.set_batch_size(batch_size)
fig = None
for e in range(load_epoch + 1, nb_epoch + 1):
# refresh log every epoch
log = refresh_log(log, model_path)
print_n_log(log, "\n\n--------------------------------------------\nepoch %d\n--------------------------------------------\n" % e)
# Reset and Randomise the dataset per epoch
dataset.reset()
dataset.randomise()
past = 0
epoch_start_time = time.clock()
# BATCH LOOP
num_of_samples = dataset.number_of_samples()
num_of_batches = int(math.floor(num_of_samples/float(batch_size)))
sample_count = 0
sum_loss = 0
sum_count = 0
c = 0
for batch_count in range(0, num_of_batches):
c += 1
# Get data per batch
x, y, sid = dataset.get_batch_xy(True)
sample_count += len(y)
# print(c)
loss, acc = model.train_on_batch(x, y, class_weight=class_weights)
# loss, acc = model.train_on_batch(x, y)
# Sums since last print
sum_loss += loss
sum_count += 1
if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches-1):
etr = (num_of_samples - sample_count) * ((time.clock() - epoch_start_time) / float(sample_count))
ttr = ((nb_epoch - e + 1) * num_of_samples - sample_count) / (((e-1) * num_of_samples + sample_count) / (time.clock() - training_start_time))
log = refresh_log(log, model_path)
print_n_log(log, "\n%d .. [loss: %.5f] Batch: %d / %d; Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;" % (
past, sum_loss / sum_count, batch_count, num_of_batches, sample_count, num_of_samples, int((etr / 60) / 60), int((etr / 60) % 60),
int(etr % 60),
int((ttr / 60) / 60), int((ttr / 60) % 60), int(ttr % 60)))
t_l[0].append((e - 1) + past * .01)
t_l[1].append(sum_loss / sum_count)
# graph it
# if not paths.is_cluster():
# if fig:
# plt.close()
# fig, ax1 = plt.subplots()
# ax1.plot(t_l[0], t_l[1], 'g-')
# ax1.plot(v_l[0], v_l[1], 'b-')
# ax1.set_ylim(bottom=0)
# ax2 = ax1.twinx()
# ax2.plot(v_a[0], v_a[1], 'r-')
# ax2.set_ylim(top=1)
past += 10
sum_loss = 0
sum_count = 0
# if past > 0:
# break
# Validation
print_n_log(log, '\n------------------------ Validation results ------------------------\n\n')
# reset and randomise validation dataset
dataset_val.reset()
dataset_val.randomise()
past = 0
val_metrics = []
num_of_samples = dataset_val.number_of_samples()
num_of_batches = int(math.floor(num_of_samples / float(batch_size)))
sample_count = 0
for batch_count in range(0, num_of_batches):
x, y, sid = dataset_val.get_batch_xy(True)
sample_count += len(y)
if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches - 1):
val_metrics.append(model.test_on_batch(x, y))
if int((float(sample_count) / num_of_samples) * 100) > past:
print_n_log(log, '.')
past += 10
val_results = np.average(val_metrics, axis=0)
print_n_log(log, '\n' + str(val_results))
v_l[0].append(e)
v_l[1].append(val_results[0])
v_a[0].append(e)
v_a[1].append(val_results[1])
# Save Figure
# fig.savefig(model_path + '/training.png')
# if not paths.is_cluster():
# fig.savefig(model_path + '/training.pdf')
# Save Model
model.save_weights(model_path + '/' + model_id + '_' + identifier + '-e' + str(e) + '_weights.h5', overwrite=True)
# if not paths.is_cluster():
# model.save(model_path + '/' + model_id + '_' + identifier + '-e' + str(e) + '.h5', overwrite=True)
# Save Training Stats
np.save(model_path + '/training_stats-e'+str(e)+'.npy', [t_l, v_l, v_a])
tt = time.clock() - training_start_time
print_n_log(log, '\n\nTotal Time Taken: %02d:%02d:%02d;\n' % (int((tt / 60) / 60), int((tt / 60) % 60), int(tt % 60)))
print_n_log(log, '\nTraining Finished\n')
log.close()
if __name__ == "__main__":
CLUSTER = paths.is_cluster() # AT END OF PROJECT MAYBE FIX SO
if CLUSTER:
p = argparse.ArgumentParser()
p.add_argument('model_id', help='The model ID MV..._01')
p.add_argument('identifier', help='The model identifier')
p.add_argument('model_path', help='The path the model save location')
p.add_argument('nb_epoch', type=int, default=20, help='The number of epochs (def: 20)')
p.add_argument('batch_size', type=int, default=16, help='The batch size (def: 16)')
p.add_argument('--load_epoch', help="load a particular saved epoch")
p = p.parse_args()
train(p.model_id, p.identifier, p.model_path, p.nb_epoch, p.batch_size, p.load_epoch)
else:
# model_id = 'MVK_57_34' #DONE
model_id = 'MVSK_10_07' #DONE
identifier = '00001'
model_path = paths.get_model_path('KERAS')
nb_epoch = 10
# nb_epoch = 5
batch_size = 16
load_epoch = None
# train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)
train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)
# model_id = 'MVK_57_33'
# train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)
|
mit
|
ssaeger/scikit-learn
|
examples/classification/plot_classification_probability.py
|
138
|
2871
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
EUDAT-B2SHARE/invenio-old
|
modules/webstat/lib/webstat_engine.py
|
1
|
105689
|
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random, cgi
from operator import itemgetter
from invenio.config import CFG_TMPDIR, \
CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_BINDIR, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, \
CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL, \
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE, \
CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import CFG_JOURNAL_TAG
from invenio.urlutils import redirect_to_url
from invenio.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values, \
search_pattern
from invenio.search_engine_utils import get_fieldvalues
from invenio.dbquery import run_sql, \
wash_table_column_name
from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
from invenio.bibcirculation_utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.bibcirculation_dblayer import get_id_bibrec, \
get_borrower_data
from invenio.websearch_webcoll import CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE
from invenio.dateutils import convert_datetext_to_datestruct, convert_datestruct_to_dategui
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args, return_sql=False):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
if args.get('collection', 'All') == 'All':
sql_query_g = _get_sql_query("creation_date", args['granularity'],
"bibrec")
sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
return_sql=return_sql, sql_text=
"Previous count: %s<br />Current count: %%s" % (sql_query_i),
acumulative=True)
else:
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
g = get_keyevent_trend_new_records(args, return_sql, True)
sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
if return_sql:
return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
return _get_trend_from_actions(g, initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative=True)
def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
"""
Returns the number of new records uploaded during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
"bibrec"),
return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
extra_select=", id", group_by=False, count=False)
if return_sql:
return sql % (lower, upper)
recs = run_sql(sql, (lower, upper))
if recs:
def add_count(i_list, element):
""" Reduce function to create a dictionary with the count of ids
for each date """
if i_list and element == i_list[-1][0]:
i_list[-1][1] += 1
else:
i_list.append([element, 1])
return i_list
action_dates = reduce(add_count,
map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
[])
else:
action_dates = []
if only_action:
return action_dates
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args, return_sql=False):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query"),
return_sql=return_sql)
def get_keyevent_trend_comments_frequency(args, return_sql=False):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT")
else:
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT", conditions=
_get_collection_recids_for_sql_query(args['collection']))
return _get_keyevent_trend(args, sql, return_sql=return_sql)
def get_keyevent_trend_search_type_distribution(args, return_sql=False):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine all simple searches:
simple = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%p=%%'")
# SQL to determine all advanced searches:
advanced = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%as=1%%'")
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, simple,
return_sql=return_sql, sql_text="Simple: %s")
a_trend = _get_keyevent_trend(args, advanced,
return_sql=return_sql, sql_text="Advanced: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, a_trend)
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args, return_sql=False):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# Collect list of timestamps of insertion in the specific collection
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("download_time",
args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
extra_select=", GROUP_CONCAT(id_bibrec)")
if return_sql:
return sql % (lower, upper)
action_dates = []
for result in run_sql(sql, (lower, upper)):
count = result[1]
for id in result[2].split(","):
if id == '' or not int(id) in ids:
count -= 1
action_dates.append((result[0], count))
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args, return_sql=False):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("loaned_on",
args["granularity"], "crcLOAN"), return_sql=return_sql)
def get_keyevent_trend_web_submissions(args, return_sql=False):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args['doctype'] == 'all':
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, return_sql=return_sql)
else:
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="doctype=%s AND action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
return_sql=return_sql)
return res
def get_keyevent_loan_statistics(args, return_sql=False):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)"
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
# Number of loans:
loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
# Only the CERN site wants the items of the collection "Books & Proceedings"
if CFG_CERN_SITE:
items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
if items_in_book_coll == "":
total_items_sql = 0
else:
total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
items_in_book_coll
else: # The rest take all the items
total_items_sql = "SELECT COUNT(*) FROM crcITEM"
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
if return_sql:
return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
loaned on total number of items</li><li>%s</li></ol>" % \
(loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
loans = run_sql(loans_sql, param)[0][0]
items_loaned = run_sql(items_loaned_sql, param)[0][0]
if total_items_sql:
total_items = run_sql(total_items_sql)[0][0]
else:
total_items = 0
if total_items == 0:
loaned_on_total = 0
never_loaned_on_total = 0
else:
# Number of items loaned on the total number of items:
loaned_on_total = float(items_loaned) * 100 / float(total_items)
# Number of items never loaned on the total number of items:
never_loaned_on_total = 100L - loaned_on_total
avg = run_sql(avg_sql, param)[0][0]
if avg:
avg = float(avg)
else:
avg = 0L
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_where = []
param = []
sql_from = ""
if 'udc' in args and args['udc'] != '':
sql_where.append("i." + _check_udc_value_where())
param.append(_get_udc_truncated(args['udc']))
if 'loan_period' in args and args['loan_period'] != '':
sql_where.append("loan_period = %s")
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)")
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br"
sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
param.append('%%%s%%' % args['creation_date'])
if sql_where:
sql_where = "WHERE %s AND" % " AND ".join(sql_where)
else:
sql_where = "WHERE"
param = tuple(param + [lower, upper])
# SQL for both queries
check_num_loans = "HAVING "
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "HAVING ":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
# Optimized to get all the data in only one query (not call get_fieldvalues several times)
mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
(sql_from, sql_where, check_num_loans)
limit_n = ""
if limit > 0:
limit_n = "LIMIT %d" % limit
nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
(sql_from, sql_where, limit_n)
items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
if return_sql:
return "Most loaned: %s<br \>Never loaned: %s" % \
(mldocs_sql % param, nldocs_sql % param)
mldocs = run_sql(mldocs_sql, param)
items = dict(run_sql(items_sql))
order_m = []
for mldoc in mldocs:
order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
float(mldoc[1]) / float(items[mldoc[0]])])
order_m = sorted(order_m, key=itemgetter(3))
order_m.reverse()
# Check limit values
if limit > 0:
order_m = order_m[:limit]
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
for mldoc in order_m:
res.append(("Most loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
mldoc[1], mldoc[2],
_check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
nldocs = run_sql(nldocs_sql, param)
for nldoc in nldocs:
res.append(("Not loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
0, items[nldoc[0]],
_check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
# nldocs = run_sql(nldocs_sql, param_n)
return (res)
def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
% (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql(sql, param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args, return_sql=False):
"""
Data:
- Number of overdue returns in a timespan
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
if return_sql:
return sql % (lower, upper)
return ((run_sql(sql, (lower, upper))[0][0], ), )
def get_keyevent_trend_returns_percentage(args, return_sql=False):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine overdue returns:
overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL \
AND (returned_on IS NULL OR returned_on > due_date)",
dates_range_param="loaned_on")
# SQL to determine all returns:
total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL",
dates_range_param="loaned_on")
# Compute the trend for both types
o_trend = _get_keyevent_trend(args, overdue,
return_sql=return_sql, sql_text="Overdue: %s")
t_trend = _get_keyevent_trend(args, total,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (o_trend, t_trend)
return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
for i in range(len(o_trend))]
def get_keyevent_ill_requests_statistics(args, return_sql=False):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 2 weeks after the date of request
creation on a timespan
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a timespan
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a timespan
Filter by
- in a specified time span
- by type of document (book or article)
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
satrequests_sql = "SELECT COUNT(*) %s %s \
AND arrival_date IS NOT NULL AND \
DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
AND arrival_date IS NOT NULL" % (sql_from, sql_where)
avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
AND arrival_date IS NOT NULL \
AND request_date IS NOT NULL" % (sql_from, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(requests_sql % param, satrequests_sql % param,
avgdel_sql % param, avgsup_sql % param)
# Number of requests:
requests = run_sql(requests_sql, param)[0][0]
# Number of satisfied ILL requests 2 weeks after the date of request creation:
satrequests = run_sql(satrequests_sql, param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql(avgdel_sql, param)[0][0]
if avgdel:
avgdel = float(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql(avgsup_sql, param)[0][0]
if avgsup:
avgsup = float(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE status != '%s' AND request_date > %%s AND request_date < %%s " \
% CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s "
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Id", "Title", "Author", "Edition")]
for req_id, item_info in run_sql(sql, param):
item_info = eval(item_info)
try:
res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
pass
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
"""
Returns the number of satisfied ILL requests 2 weeks after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
sql_from = "crcILLREQUEST ill "
sql_where = ""
param = []
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() AND \
(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
# SQL to determine all ILL requests:
total = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
return_sql=return_sql, sql_text="Satisfied: %s")
t_trend = _get_keyevent_trend(args, total, extra_param=param,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, t_trend)
return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args, return_sql=False):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
# Number of new items:
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
if return_sql:
return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
def get_keyevent_items_lists(args, return_sql=False, limit=50):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
if len(param) == 0:
sqlres = run_sql(sql)
else:
sqlres = run_sql(sql, tuple(param))
sql = sql % param
if return_sql:
return sql
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args, return_sql=False):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
param = tuple(param)
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
# Number of successful hold requests transactions
succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='%s'" % (sql_from, sql_where,
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE)
# Average time between the hold request date and the date of delivery document in a year
avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(holds % param, succesful_holds % param, avg_sql % param)
avg = run_sql(avg_sql, param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((run_sql(holds, param)[0][0], ),
(run_sql(succesful_holds, param)[0][0], ), (avg, ))
def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT lr.barcode %s %s GROUP BY barcode \
ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
if return_sql:
return sql
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql(sql, param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args, return_sql=False):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
param = (lower, upper, lower, upper)
# Total number of active users:
users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
UNION (SELECT id_crcBORROWER user %s %s)) res" % \
(sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
if return_sql:
return users % param
return ((run_sql(users, param)[0][0], ), )
def get_keyevent_user_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = (lower, upper, lower, upper)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
%s" % (limit)
if return_sql:
return sql % param
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql(sql, param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals = run_sql("SELECT COUNT(*), \
SUM(number_of_renewals) \
FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
def get_last_updates():
"""
List date/time when the last updates where done (easy reading format).
@return: last indexing, last ranking, last sorting, last webcolling
@type: (datetime, datetime, datetime, datetime)
"""
try:
last_index = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM idxINDEX WHERE \
name="global"')[0][0])))
last_rank = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM rnkMETHOD ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
last_sort = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM bsrMETHODDATA ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(file_coll_last_update.read())))
file_coll_last_update.close()
# database not filled
except IndexError:
return ("", "", "", "")
return (last_index, last_rank, last_sort, last_coll)
def get_list_link(process, category=None):
"""
Builds the link for the list of records not indexed, ranked, sorted or
collected.
@param process: kind of process the records are waiting for (index, rank,
sort, collect)
@type process: str
@param category: specific sub-category of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount)
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: link text
@type: string
"""
if process == "index":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name=%s)', (category,))
elif process == "rank":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
rnkMETHOD WHERE name=%s)', (category,))
elif process == "sort":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
bsrMETHODDATA WHERE id_bsrMETHOD=(SELECT id \
FROM bsrMETHOD WHERE name=%s))', (category,))
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
coll_last_update = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > %s', (coll_last_update,))
# build the link
if list_registers == ():
return "Up to date"
link = '<a href="' + CFG_SITE_URL + '/search?p='
for register in list_registers:
link += 'recid%3A' + str(register[0]) + '+or+'
# delete the last '+or+'
link = link[:len(link)-4]
link += '">' + str(len(list_registers)) + '</a>'
return link
def get_search_link(record_id):
"""
Auxiliar, builds the direct link for a given record.
@param record_id: record's id number
@type record_id: int
@return: link text
@type: string
"""
link = '<a href="' + CFG_SITE_URL + '/record/' + \
str(record_id) + '">Record [' + str(record_id) + ']</a>'
return link
def get_ingestion_matching_records(request=None, limit=25):
"""
Fetches all the records matching a given pattern, arranges them by last
modificaton date and returns a list.
@param request: requested pattern to match
@type request: str
@return: list of records matching a pattern,
(0,) if no request,
(-1,) if the request was invalid
@type: list
"""
if request==None or request=="":
return (0,)
try:
records = list(search_pattern(p=request))
except:
return (-1,)
if records == []:
return records
# order by most recent modification date
query = 'SELECT id FROM bibrec WHERE '
for r in records:
query += 'id="' + str(r) + '" OR '
query = query[:len(query)-4]
query += ' ORDER BY modification_date DESC LIMIT %s'
list_records = run_sql(query, (limit,))
final_list = []
for lr in list_records:
final_list.append(lr[0])
return final_list
def get_record_ingestion_status(record_id):
"""
Returns the amount of ingestion methods not updated yet to a given record.
If 0, the record is up to date.
@param record_id: record id number
@type record_id: int
@return: number of methods not updated for the record
@type: int
"""
counter = 0
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")', (record_id, ))[0][0]
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
rnkMETHOD ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
counter = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
bsrMETHODDATA ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = file_coll_last_update.read()
file_coll_last_update.close()
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND \
modification_date >\
%s', (record_id, last_coll,))[0][0]
return counter
def get_specific_ingestion_status(record_id, process, method=None):
"""
Returns whether a record is or not up to date for a given
process and method.
@param record_id: identification number of the record
@type record_id: int
@param process: kind of process the records may be waiting for (index,
rank, sort, collect)
@type process: str
@param method: specific sub-method of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: text: None if the record is up to date
Last time the method was updated if it is waiting
@type: date/time string
"""
exist = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s', (record_id, ))
if exist[0][0] == 0:
return "REG not in DB"
if process == "index":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM idxINDEX WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM idxINDEX WHERE \
name=%s', (method,))[0][0]
elif process == "rank":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM rnkMETHOD WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM rnkMETHOD WHERE \
name=%s', (method,))[0][0]
elif process == "sort":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s))', (record_id, method,))
last_time = run_sql ('SELECT last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s)', (method,))[0][0]
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_time = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s \
AND modification_date > %s',
(record_id, last_time,))
# no results means the register is up to date
if list_registers[0][0] == 0:
return None
else:
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(last_time)))
def get_title_ingestion(record_id, last_modification):
"""
Auxiliar, builds a direct link for a given record, with its last
modification date.
@param record_id: id number of the record
@type record_id: string
@param last_modification: date/time of the last modification
@type last_modification: string
@return: link text
@type: string
"""
return '<h3><a href="%s/record/%s">Record [%s] last modification: %s</a></h3>' \
% (CFG_SITE_URL, record_id, record_id, last_modification)
def get_record_last_modification (record_id):
"""
Returns the date/time of the last modification made to a given record.
@param record_id: id number of the record
@type record_id: int
@return: date/time of the last modification
@type: string
"""
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT modification_date FROM bibrec \
WHERE id=%s', (record_id,))[0][0])))
def get_general_status():
"""
Returns an aproximate amount of ingestions processes not aplied to new or
updated records, using the "global" category.
@return: number of processes not updated
@type: int
"""
return run_sql('SELECT COUNT(*) FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")')[0][0]
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
where = []
sql_param = [lower, upper]
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "" or not where:
where.append(wash_table_column_name(col_title))
elif col_bool == "and":
where.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
where.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
where.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
where.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = CFG_JOURNAL_TAG.replace("%", "p")
# First get records of the year
recids = perform_request_search(p=query, of="id", wl=0)
# Then return list by tag
pub = get_most_popular_field_values(recids, tag)
if len(pub) == 0:
return []
if CFG_CERN_SITE:
total = sum([x[1] for x in pub])
else:
others = 0
total = 0
first_other = -1
for elem in pub:
total += elem[1]
if elem[1] < 2:
if first_other == -1:
first_other = pub.index(elem)
others += elem[1]
del pub[first_other:]
if others != 0:
pub.append(('Others', others))
pub.append(('TOTAL', total))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
return
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = len(labels)
width = 6 + float(numb_elem) / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (float(numb_elem) * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if not trend or len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data steps')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
data.append([rows, trend[-1][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
data.append(trend[-1][1])
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
size = settings.get("size", "500,400").split(",")
title = cgi.escape(settings["title"].replace(" ", "")[:10])
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/js/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
'<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
'<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate%(title)s(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime() - (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData%(title)s() {""" % \
{'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
'hheigth': int(size[1]) / 2, 'title': title}
if(len(trend) > 1):
granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
_to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
else:
granularity_td = datetime.timedelta()
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += ", [parseDate%s('%s'), %d]];\n" % (title,
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S')+ granularity_td,
trend[-1][1][col])
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """, [parseDate%s("%s"), %d]];
return [d1];
}
""" % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
granularity_td, trend[-1][1])
# Set options
tics = """yaxis: {
tickDecimals : 0
},"""
if settings["xtic_format"] != '':
current = _to_datetime(maxx, '%Y-%m-%d %H:%M:%S')
next = current + granularity_td
if (granularity_td.seconds + granularity_td.days * 24 * 3600) > 2592000:
next = current.replace(day=31)
tics += 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
% (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title, next)
out += """var options%s ={
series: {
lines: { steps: true, fill: true},
points: { show: false }
},
legend: {show: false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % (title, tics, )
# Write the plot method in javascript
out += """var startData%(title)s = getData%(title)s();
var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
// setup overview
var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
legend: { show: true, container: $("#overviewLegend%(title)s") },
series: {
lines: { steps: true, fill: true, lineWidth: 1},
shadowSize: 0
},
%(tics)s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % {"title": title, "tics": tics}
# Tooltip and zoom
out += """
function showTooltip%(title)s(x, y, contents) {
$('<div id="tooltip%(title)s">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint%(title)s = null;
$("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint%(title)s != item.datapoint) {
previousPoint%(title)s = item.datapoint;
$("#tooltip%(title)s").remove();
var y = item.datapoint[1];
showTooltip%(title)s(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip%(title)s").remove();
previousPoint%(title)s = null;
}
});
$("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
if (item) {
plot%(title)s.highlight(item.series, item.datapoint);
}
});
// now connect the two
$("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot%(title)s = $.plot($("#placeholder%(title)s"), getData%(title)s(ranges.xaxis.from, ranges.xaxis.to),
$.extend(true, {}, options%(title)s, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview%(title)s.setSelection(ranges, true);
});
$("#overview%(title)s").bind("plotselected", function (event, ranges) {
plot%(title)s.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>""" % {'title' : title}
open(path, 'w').write(out)
def get_numeric_stats(data, multiple):
""" Returns average, max and min values for data """
data = [x[1] for x in data]
if data == []:
return (0, 0, 0)
if multiple:
lists = []
for i in range(len(data[0])):
lists.append([x[i] for x in data])
return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
[min(x) for x in lists])
else:
return (float(sum(data)) / len(data), max(data), min(data))
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_file(data, req):
"""
Exports the data to a file.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
try:
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
except ImportError:
csv_list = []
for row in data:
row = ['"%s"' % str(col) for col in row]
csv_list.append(",".join(row))
_export('text/csv', '\n'.join(csv_list), req)
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format, acumulative=False):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in dt_format
@type t_start: str
@param t_end: End time for the time domain in dt_format
@type t_end: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates = list(action_dates)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
vector = [(None, initial_value)]
try:
upcoming_action = action_dates.pop()
#Do not count null values (when year, month or day is 0)
if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
# Create an iterator running from the first day of activity
for current in _get_datetime_iter(t_start, granularity, dt_format):
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if acumulative:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
if upcoming_action[0] == {"year": current.year,
"month": current.month,
"day": current.day,
"hour": current.hour,
"minute": current.minute,
"second": current.second
}[granularity]:
actions_here += upcoming_action[1]
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
# Make sure to stop the iteration at the end time
if {"year": current.year >= stop_at.year,
"month": current.month >= stop_at.month and current.year == stop_at.year,
"day": current.day >= stop_at.day and current.month == stop_at.month,
"hour": current.hour >= stop_at.hour and current.day == stop_at.day,
"minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
"second": current.second >= stop_at.second and current.minute == stop_at.minute
}[granularity]:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
return_sql=False, sql_text='%s', acumulative=False):
"""
Returns the trend for the sql passed in the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = tuple([lower, upper] + extra_param)
if return_sql:
sql = sql % param
return sql_text % sql
return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative)
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' parameter
@type dt_format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [(CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, "Cancelled"),
(CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, "Claimed"),
(CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, "In process"),
(CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, "Not arrived"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, "On loan"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, "On order"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, "On shelf")] + \
[(status, status) for status in CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
if res:
return res[0][0]
return ''
def _get_collection_recids_for_sql_query(coll):
ids = get_collection_reclist(coll).tolist()
if len(ids) == 0:
return ""
return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
def _check_udc_value_where():
return "id_bibrec IN (SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s) "
def _get_udc_truncated(udc):
if udc[-1] == '*':
return "%s%%" % udc[:-1]
if udc[0] == '*':
return "%%%s" % udc[1:]
return "%s" % udc
def _check_empty_value(value):
if len(value) == 0:
return ""
else:
return value[0][0]
def _get_granularity_sql_functions(granularity):
try:
return {
"year": ("YEAR",),
"month": ("YEAR", "MONTH",),
"day": ("MONTH", "DAY",),
"hour": ("DAY", "HOUR",),
"minute": ("HOUR", "MINUTE",),
"second": ("MINUTE", "SECOND")
}[granularity]
except KeyError:
return ("MONTH", "DAY",)
def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
extra_select="", dates_range_param="", group_by=True, count=True):
if len(dates_range_param) == 0:
dates_range_param = creation_time_name
conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
len(conditions) > 0 and "AND %s" % conditions or "")
values = {'creation_time_name': creation_time_name,
'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
'count': count and ", COUNT(*)" or "",
'tables_from': tables_from,
'conditions': conditions,
'extra_select': extra_select,
'group_by': ""}
if group_by:
values['group_by'] = "GROUP BY "
for fun in _get_granularity_sql_functions(granularity):
values['group_by'] += "%s(%s), " % (fun, creation_time_name)
values['group_by'] = values['group_by'][:-2]
return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
FROM %(tables_from)s WHERE %(conditions)s \
%(group_by)s \
ORDER BY %(creation_time_name)s DESC" % values
|
gpl-2.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/tests/test_docstring_parameters.py
|
3
|
5239
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
import inspect
import sys
import warnings
import importlib
from pkgutil import walk_packages
from inspect import getsource
import sklearn
from sklearn.base import signature
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_docstring_parameters
from sklearn.utils.testing import _get_func_name
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.deprecation import _is_deprecated
PUBLIC_MODULES = set(['sklearn.' + modname
for _, modname, _ in walk_packages(sklearn.__path__)
if not modname.startswith('_') and
'.tests.' not in modname])
# TODO Uncomment all modules and fix doc inconsistencies everywhere
# The list of modules that are not tested for now
PUBLIC_MODULES -= set([
'sklearn.ensemble',
'sklearn.feature_selection',
'sklearn.kernel_approximation',
'sklearn.model_selection',
'sklearn.multioutput',
'sklearn.random_projection',
'sklearn.setup',
'sklearn.svm',
'sklearn.utils',
# Deprecated modules
'sklearn.cross_validation',
'sklearn.grid_search',
'sklearn.learning_curve',
])
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
'sklearn.utils.deprecation.load_mlcomp',
'sklearn.pipeline.make_pipeline',
'sklearn.pipeline.make_union',
'sklearn.utils.extmath.safe_sparse_dot',
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
'fit',
'score',
'fit_predict',
'fit_transform',
'partial_fit',
'predict'
]
def test_docstring_parameters():
raise SkipTest('Not testing docstring parameter consistency')
# Test module docstring formatting
# Skip test if numpydoc is not found or if python version is < 3.5
try:
import numpydoc # noqa
assert sys.version_info >= (3, 5)
except (ImportError, AssertionError):
raise SkipTest("numpydoc is required to test the docstrings, "
"as well as python version >= 3.5")
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES:
continue
if cname.startswith('_'):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
cls_init = getattr(cls, '__init__', None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(
cls.__init__, cdoc, class_name=cname)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if ('y' in sig.parameters and
sig.parameters['y'].default is None):
param_ignore = ['y'] # ignore y for fit and score
result = check_docstring_parameters(
method, ignore=param_ignore, class_name=cname)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith('_'):
continue
name_ = _get_func_name(func)
if (not any(d in name_ for d in _DOCSTRING_IGNORES) and
not _is_deprecated(func)):
incorrect += check_docstring_parameters(func)
msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
if len(incorrect) > 0:
raise AssertionError(msg)
@ignore_warnings(category=DeprecationWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(sklearn.__path__,
prefix='sklearn.'):
# because we don't import
mod = importlib.import_module(modname)
try:
source = getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them ',
'or add it to theignore list'
% modname)
|
mit
|
sfxfactor/StellarNumericalProj
|
plots.py
|
1
|
2582
|
import numpy as np
import matplotlib.pyplot as plt
import polytrope as poly
import scipy.interpolate as intp
from astropy.io import fits
import sys
from scipy.optimize import minimize
#argument to plots.py is the index of the polytrope
n = float(sys.argv[1])
filename='poly'+str(n)
#if the fits file exists, use it. If not, integrate to get the data
try:
data = fits.getdata(filename+'.fits')
except IOError:
data = poly.poly(n,0.0001,0.000001,filename)
#split out important colums of data
z=data[0]
th=data[1]
mz2dthdz=data[2]
m3ozdthdz=data[3]
#extrapolate using linear interpolation to 0
zn = intp.UnivariateSpline(th[-2:][::-1],z[-2:][::-1],k=1)(0.)
mz2dthdzn = intp.UnivariateSpline(z[-2:],mz2dthdz[-2:],k=1)(zn)
print "zn = ", zn
print "(-z^2 dth/dz)zn = ",mz2dthdzn
#constants
Msun = 1.989e33 #g
Rsun = 6.9599e10 #cm
G = 6.67259e-8 #cm^3 g^-1 s^-2
M = 100. * Msun
R = 5.51729860191 * Rsun
#print important values
pc = (M/(4.*np.pi*mz2dthdzn))*(zn/R)**3.
print "pc = ",pc
A = zn/R
print "A = ",A
K=((4.*np.pi*G)/(n*A**2.))*pc**(1.-1./n)
print "K = ",K
Pc=K*pc**(1.+1./n)
print "Pc = ",Pc
#replace . with p in plot filenames so LaTeX doesn't complain
sn = list(str(n))
sn[1]='p'
sn="".join(sn)
#set up dimentional variables and plot
r = z/A
p = pc*th**n
P = K*p**(1.+1./n)
m = 4.*np.pi*p*r**2.
dr = r[1000]-r[999]
menc = np.cumsum(m*dr)
plt.plot(r/R,menc/Msun)
plt.xlabel(r"r/R")
plt.ylabel(r"$m$ [M$_\odot$]")
plt.savefig('mass'+sn+'.pdf')
plt.clf()
plt.plot(menc/Msun,p)
plt.xlabel(r"$m$ [M$_\odot$]")
plt.ylabel(r"$\rho$ [g cm$^{-3}$]")
#plt.hlines(pc*m3ozdthdz[-1],0,M)
plt.savefig('density'+sn+'.pdf')
plt.clf()
plt.plot(menc/Msun,P)
plt.xlabel(r"$m$ [M$_\odot$]")
plt.ylabel(r"$P$ [dyne cm$^{-2}$]")
plt.savefig('pressure'+sn+'.pdf')
plt.clf()
#constants
a = 7.56591e-15
kb = 1.38e-16
mh = 1.674e-24
mu = 1./(2.*0.76+(3./4.)*0.26)
print mu
#def eos(T):
# m=p*kb*T/(mu*mh)+(a/3.)*T**4-P
# return m
#
#T0 = np.logspace(np.log10(1.9e8),np.log10(1.1e5),np.size(p))
#print np.shape(T0)
#res = minimize(eos,T0)
B=0.582814
T=((1.-B)*P*3./a)**0.25
plt.plot(menc/Msun,T)
plt.xlabel(r"$m$ [M$_\odot$]")
plt.ylabel(r"$T$ [K]")
plt.savefig("temperature"+sn+'.pdf')
plt.clf()
B=0.5
#temperature from eq 19.22
T=(((1.-B)*3.*P)/a)**0.25
plt.plot(menc/Msun,np.log10(T))
plt.xlabel(r"$m$ [M$_\odot$]")
plt.ylabel(r"$T$ [K]")
plt.savefig('temp'+sn+'.pdf')
plt.clf()
U = -G*4.*np.pi* np.trapz((menc*p*r),r)
print "integrated Ugrav = ", U
print "Eg (19.44) = ",(-3./(5.-n))*(G*M*M)/R
print "Eg ave density = ",-G*(16./15.)*np.pi**2*(pc*m3ozdthdz[-1])**2*R**5
|
mit
|
badlands-model/BayesLands
|
pyBadlands/simulation/buildFlux.py
|
1
|
13637
|
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This file is the main entry point to compute flow network and associated sedimentary fluxes.
"""
import sys
import time
import numpy as np
import mpi4py.MPI as mpi
from matplotlib import path
from pyBadlands import (elevationTIN)
def streamflow(input, FVmesh, recGrid, force, hillslope, flow, elevation, \
lGIDs, rain, tNow, verbose=False):
"""
Compute flow network.
"""
rank = mpi.COMM_WORLD.rank
size = mpi.COMM_WORLD.size
comm = mpi.COMM_WORLD
# Update sea-level
walltime = time.clock()
force.getSea(tNow)
fillH = None
# Update river input
force.getRivers(tNow)
riverrain = rain+force.rivQw
# Build an initial depression-less surface at start time if required
if input.tStart == tNow and input.nopit == 1 :
fillH = elevationTIN.pit_stack_PD(elevation,input.nopit,force.sealevel)
elevation = fillH
else:
fillH = elevationTIN.pit_stack_PD(elevation,0,force.sealevel)
if rank == 0 and verbose and input.spl:
print " - depression-less algorithm PD with stack", time.clock() - walltime
# Compute stream network
walltime = time.clock()
flow.SFD_receivers(fillH, elevation, FVmesh.neighbours,
FVmesh.vor_edges, FVmesh.edge_length,
lGIDs)
if rank == 0 and verbose:
print " - compute receivers parallel ", time.clock() - walltime
# Distribute evenly local minimas to processors on filled surface
walltime = time.clock()
flow.localbase = np.array_split(flow.base, size)[rank]
flow.ordered_node_array_filled()
if rank == 0 and verbose:
print " - compute stack order locally for filled surface", time.clock() - walltime
walltime = time.clock()
stackNbs = comm.allgather(len(flow.localstack))
globalstack = np.zeros(sum(stackNbs), dtype=flow.localstack.dtype)
comm.Allgatherv(sendbuf=[flow.localstack, mpi.INT],
recvbuf=[globalstack, (stackNbs, None), mpi.INT])
flow.stack = globalstack
if rank == 0 and verbose:
print " - send stack order for filled surface globally ", time.clock() - walltime
# Distribute evenly local minimas on real surface
walltime = time.clock()
flow.localbase1 = np.array_split(flow.base1, size)[rank]
flow.ordered_node_array_elev()
if rank == 0 and verbose:
print " - compute stack order locally for real surface", time.clock() - walltime
walltime = time.clock()
stackNbs1 = comm.allgather(len(flow.localstack1))
globalstack1 = np.zeros(sum(stackNbs1), dtype=flow.localstack1.dtype)
comm.Allgatherv(sendbuf=[flow.localstack1, mpi.INT],
recvbuf=[globalstack1, (stackNbs1, None), mpi.INT])
flow.stack1 = globalstack1
if rank == 0 and verbose:
print " - send stack order for real surface globally ", time.clock() - walltime
# Compute a unique ID for each local depression and their downstream draining nodes
flow.compute_parameters_depression(fillH,elevation,FVmesh.control_volumes,force.sealevel)
# Compute discharge
walltime = time.clock()
flow.compute_flow(elevation, FVmesh.control_volumes, riverrain)
if rank == 0 and verbose:
print " - compute discharge ", time.clock() - walltime
return fillH, elevation
def sediment_flux(input, recGrid, hillslope, FVmesh, tMesh, flow, force, rain, lGIDs, applyDisp, straTIN, \
mapero, cumdiff, cumhill, fillH, disp, inGIDs, elevation, tNow, tEnd, verbose=False):
"""
Compute sediment fluxes.
"""
rank = mpi.COMM_WORLD.rank
size = mpi.COMM_WORLD.size
comm = mpi.COMM_WORLD
flow_time = time.clock()
#verbose = True
# Get active layer
if straTIN is not None:
walltime = time.clock()
flow.activelay[flow.activelay<1.] = 1.
flow.activelay[flow.activelay>straTIN.activeh] = straTIN.activeh
straTIN.get_active_layer(flow.activelay,verbose)
activelay = straTIN.alayR
flow.straTIN = 1
# Set the average erodibility based on rock types in the active layer
flow.erodibility = np.sum(straTIN.rockCk*activelay/flow.activelay.reshape(len(elevation),1),axis=1)
eroCk = straTIN.rockCk
if rank == 0 and verbose:
print " - Get active layer ", time.clock() - walltime
else:
activelay = None
eroCk = 0.
# Find border/inside nodes
if flow.domain is None:
ids = np.arange(len(FVmesh.control_volumes))
tmp1 = np.where(FVmesh.control_volumes>0.)[0]
xyMin = [recGrid.regX.min()-1., recGrid.regY.min()-1.]
xyMax = [recGrid.regX.max()+1., recGrid.regY.max()+1.]
flow.domain = path.Path([(xyMin[0],xyMin[1]),(xyMax[0],xyMin[1]), (xyMax[0],xyMax[1]), (xyMin[0],xyMax[1])])
tmp2 = flow.domain.contains_points(flow.xycoords)
flow.insideIDs = np.intersect1d(tmp1,ids[tmp2])
flow.borders = np.zeros(len(FVmesh.control_volumes),dtype=int)
flow.borders[flow.insideIDs] = 1
flow.outsideIDs = np.where(flow.borders==0)[0]
xyMin2 = [recGrid.regX.min()+recGrid.resEdges, recGrid.regY.min()+recGrid.resEdges]
xyMax2 = [recGrid.regX.max()-recGrid.resEdges, recGrid.regY.max()-recGrid.resEdges]
xyMin2 = [recGrid.regX.min()+1, recGrid.regY.min()+1]
xyMax2 = [recGrid.regX.max()-1, recGrid.regY.max()-1]
domain = path.Path([(xyMin2[0],xyMin2[1]),(xyMax2[0],xyMin2[1]), (xyMax2[0],xyMax2[1]), (xyMin2[0],xyMax2[1])])
tmp3 = domain.contains_points(flow.xycoords)
flow.insideIDs2 = ids[tmp3]
flow.borders2 = np.zeros(len(FVmesh.control_volumes),dtype=int)
flow.borders2[flow.insideIDs2] = 1
flow.outsideIDs2 = np.where(flow.borders2==0)[0]
# Compute CFL condition
walltime = time.clock()
if input.Hillslope and hillslope.updatedt == 0:
if hillslope.Sc == 0:
hillslope.dt_stability(FVmesh.edge_length[inGIDs,:tMesh.maxNgbh])
else:
hillslope.dt_stabilityCs(elevation, FVmesh.neighbours, FVmesh.edge_length,
lGIDs, flow.borders2)
if hillslope.CFL < input.minDT:
print 'Decrease your hillslope diffusion coefficients to ensure stability.'
sys.exit(0)
hillslope.dt_stability_ms(FVmesh.edge_length[inGIDs,:tMesh.maxNgbh])
elif hillslope.CFL is None:
hillslope.CFL = tEnd-tNow
flow.dt_stability(fillH, inGIDs)
CFLtime = min(flow.CFL, hillslope.CFL)
if CFLtime>1.:
CFLtime = float(round(CFLtime-0.5,0))
if rank == 0 and verbose:
print 'CFL for hillslope and flow ',hillslope.CFL,flow.CFL,CFLtime
CFLtime = min(CFLtime, tEnd - tNow)
CFLtime = max(input.minDT, CFLtime)
CFLtime = min(input.maxDT, CFLtime)
if rank == 0 and verbose:
print " - Get CFL time step ", time.clock() - walltime
# Compute sediment fluxes
if input.erolays >= 0:
oldelev = np.copy(elevation)
# Initial cumulative elevation change
walltime = time.clock()
timestep, sedchange, erosion, deposition = flow.compute_sedflux(FVmesh.control_volumes, elevation, rain, fillH,
CFLtime, activelay, eroCk, force.rivQs, force.sealevel, input.perc_dep,
input.slp_cr, FVmesh.neighbours, verbose=False)
if rank == 0 and verbose:
print " - Get stream fluxes ", time.clock() - walltime
ed = np.sum(sedchange,axis=1)
elevation += ed
cumdiff += ed
# Compute marine sediment diffusion
if hillslope.CDriver > 0.:
walltime = time.clock()
# Initialise marine sediments diffusion array
it = 0
sumdep = np.sum(deposition,axis=1)
maxth = 0.1
diffstep = timestep
diffcoeff = hillslope.sedfluxmarine(force.sealevel, elevation, FVmesh.control_volumes)
# Perform river related sediment diffusion
while diffstep > 0. and it < 1000:
# Define maximum time step
maxstep = min(hillslope.CFLms,diffstep)
# Compute maximum marine fluxes and maximum timestep to avoid excessive diffusion erosion
diffmarine, mindt = flow.compute_marine_diffusion(elevation, sumdep, FVmesh.neighbours, FVmesh.vor_edges,
FVmesh.edge_length, diffcoeff, lGIDs, force.sealevel, maxth, maxstep)
diffmarine[flow.outsideIDs] = 0.
maxstep = min(mindt,maxstep)
# if maxstep < input.minDT:
# print 'WARNING: marine diffusion time step is smaller than minimum timestep:',maxstep
# print 'You will need to decrease your diffusion coefficient for criver'
# stop
# Update diffusion time step and total diffused thicknesses
diffstep -= maxstep
# Distribute rock based on their respective proportions in the deposited columns
if straTIN is not None:
# Compute multi-rock diffusion
sedpropflux, difftot = flow.compute_sediment_marine(elevation, deposition, sumdep,
diffcoeff*maxstep, FVmesh.neighbours, force.sealevel,
maxth, FVmesh.vor_edges, FVmesh.edge_length, lGIDs)
difftot[flow.outsideIDs] = 0.
sedpropflux[flow.outsideIDs,:] = 0.
# Update deposition for each rock type
deposition += sedpropflux
deposition[deposition<0] = 0.
# Update elevation, erosion/deposition
sumdep += difftot
elevation += difftot
cumdiff += difftot
else:
# Update elevation, erosion/deposition
sumdep += diffmarine*maxstep
elevation += diffmarine*maxstep
cumdiff += diffmarine*maxstep
it += 1
if rank == 0 and verbose:
print " - Get river sediment marine fluxes ", time.clock() - walltime
# Compute hillslope processes
dtype = 1
if straTIN is None:
dtype = 0
walltime = time.clock()
area = np.copy(FVmesh.control_volumes)
area[flow.outsideIDs2] = 0.
diffcoeff = hillslope.sedflux(force.sealevel, elevation, FVmesh.control_volumes)
diffcoeff[flow.outsideIDs2] = 0.
diff_flux = flow.compute_hillslope_diffusion(elevation, FVmesh.neighbours, FVmesh.vor_edges,
FVmesh.edge_length, lGIDs, dtype, hillslope.Sc)
diff_flux[flow.outsideIDs2] = 0.
cdiff = diffcoeff*diff_flux*timestep
if straTIN is None:
if input.btype == 'outlet':
cdiff[flow.insideIDs[0]] = 0.
# Update dataset
elevation[flow.insideIDs] += cdiff[flow.insideIDs]
cumdiff[flow.insideIDs] += cdiff[flow.insideIDs]
cumhill[flow.insideIDs] += cdiff[flow.insideIDs]
else:
straTIN.update_layers(erosion, deposition, elevation, verbose)
# Get the active layer thickness to erode using diffusion
maxlayh = -cdiff
maxlayh[maxlayh<1.] = 1.
straTIN.get_active_layer(maxlayh)
# Compute multi-rock diffusion
tdiff, erosion, deposition = flow.compute_sediment_hillslope(elevation, straTIN.alayR,
diffcoeff*timestep, FVmesh.neighbours, FVmesh.vor_edges,
maxlayh, FVmesh.edge_length, lGIDs)
if input.btype == 'outlet':
tdiff[flow.insideIDs[0],:] = 0.
# # Update dataset
elevation += tdiff
cumdiff += tdiff
cumhill += tdiff
# Update active layer
straTIN.update_layers(erosion, deposition, elevation, verbose)
if input.btype == 'slope':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]-0.1
elif input.btype == 'flat':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]
elif input.btype == 'wall':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]+100.
elif input.btype == 'outlet':
elevation[1:len(flow.parentIDs)] = elevation[flow.parentIDs[1:]]+100.
elif input.btype == 'wall1':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]-0.1
elevation[:recGrid.nx+1] = elevation[flow.parentIDs[:recGrid.nx+1]]+100.
if rank == 0 and verbose:
print " - Get hillslope fluxes ", time.clock() - walltime
# Update erodibility values
if input.erolays >= 0:
mapero.getErodibility(elevation-oldelev)
flow.erodibility = mapero.erodibility
if applyDisp:
elevation += disp * timestep
tNow += timestep
if rank == 0 and verbose:
print " - Flow computation ", time.clock() - flow_time
return tNow,elevation,cumdiff,cumhill
|
gpl-3.0
|
prheenan/BioModel
|
BellZhurkov/Python/Code/Bell_Helper.py
|
1
|
1033
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from FitUtils.Python import FitClasses
class BellParamValues(FitClasses.ParamValues):
"""
Class to record parameter values given to a fit or gotten from the same
"""
def __init__(self,**kwargs):
"""
Args:
**kwargs: see FitClasses.ParamValues
"""
super(BellParamValues,self).__init__(**kwargs)
def InitParams(self):
"""
Initiliaze parameters...
"""
Params = ["beta",
"k0",
"DeltaG",
"DeltaX",]
return Params
def Scale(self,x,y):
"""
Scales the variables to x and y (Force and rate) limits
"""
return dict(k0=1,
DeltaX=1,
DeltaG=1,
beta=1)
|
gpl-2.0
|
arbalet-project/arbasdk
|
arbalet/colors.py
|
1
|
14076
|
"""
Arbalet - ARduino-BAsed LEd Table
Operations on colors
Copyright 2015 Yoan Mollard - Arbalet project - http://github.com/arbalet-project
License: GPL version 3 http://www.gnu.org/licenses/gpl.html
"""
from numpy import isscalar, array
# From matplotlib.colors
cnames = {
'aliceblue': array((0.9411764705882353, 0.9725490196078431, 1.0)),
'antiquewhite': array((0.9803921568627451, 0.9215686274509803, 0.8431372549019608)),
'aqua': array((0.0, 1.0, 1.0)),
'aquamarine': array((0.4980392156862745, 1.0, 0.8313725490196079)),
'azure': array((0.9411764705882353, 1.0, 1.0)),
'beige': array((0.9607843137254902, 0.9607843137254902, 0.8627450980392157)),
'bisque': array((1.0, 0.8941176470588236, 0.7686274509803922)),
'black': array((0.0, 0.0, 0.0)),
'blanchedalmond': array((1.0, 0.9215686274509803, 0.803921568627451)),
'blue': array((0.0, 0.0, 1.0)),
'blueviolet': array((0.5411764705882353, 0.16862745098039217, 0.8862745098039215)),
'brown': array((0.6470588235294118, 0.16470588235294117, 0.16470588235294117)),
'burlywood': array((0.8705882352941177, 0.7215686274509804, 0.5294117647058824)),
'cadetblue': array((0.37254901960784315, 0.6196078431372549, 0.6274509803921569)),
'chartreuse': array((0.4980392156862745, 1.0, 0.0)),
'chocolate': array((0.8235294117647058, 0.4117647058823529, 0.11764705882352941)),
'coral': array((1.0, 0.4980392156862745, 0.3137254901960784)),
'cornflowerblue': array((0.39215686274509803, 0.5843137254901961, 0.9294117647058824)),
'cornsilk': array((1.0, 0.9725490196078431, 0.8627450980392157)),
'crimson': array((0.8627450980392157, 0.0784313725490196, 0.23529411764705882)),
'cyan': array((0.0, 1.0, 1.0)),
'darkblue': array((0.0, 0.0, 0.5450980392156862)),
'darkcyan': array((0.0, 0.5450980392156862, 0.5450980392156862)),
'darkgoldenrod': array((0.7215686274509804, 0.5254901960784314, 0.043137254901960784)),
'darkgray': array((0.6627450980392157, 0.6627450980392157, 0.6627450980392157)),
'darkgreen': array((0.0, 0.39215686274509803, 0.0)),
'darkgrey': array((0.6627450980392157, 0.6627450980392157, 0.6627450980392157)),
'darkkhaki': array((0.7411764705882353, 0.7176470588235294, 0.4196078431372549)),
'darkmagenta': array((0.5450980392156862, 0.0, 0.5450980392156862)),
'darkolivegreen': array((0.3333333333333333, 0.4196078431372549, 0.1843137254901961)),
'darkorange': array((1.0, 0.5490196078431373, 0.0)),
'darkorchid': array((0.6, 0.19607843137254902, 0.8)),
'darkred': array((0.5450980392156862, 0.0, 0.0)),
'darksage': array((0.34901960784313724, 0.5215686274509804, 0.33725490196078434)),
'darksalmon': array((0.9137254901960784, 0.5882352941176471, 0.47843137254901963)),
'darkseagreen': array((0.5607843137254902, 0.7372549019607844, 0.5607843137254902)),
'darkslateblue': array((0.2823529411764706, 0.23921568627450981, 0.5450980392156862)),
'darkslategray': array((0.1843137254901961, 0.30980392156862746, 0.30980392156862746)),
'darkslategrey': array((0.1843137254901961, 0.30980392156862746, 0.30980392156862746)),
'darkturquoise': array((0.0, 0.807843137254902, 0.8196078431372549)),
'darkviolet': array((0.5803921568627451, 0.0, 0.8274509803921568)),
'deeppink': array((1.0, 0.0784313725490196, 0.5764705882352941)),
'deepskyblue': array((0.0, 0.7490196078431373, 1.0)),
'dimgray': array((0.4117647058823529, 0.4117647058823529, 0.4117647058823529)),
'dimgrey': array((0.4117647058823529, 0.4117647058823529, 0.4117647058823529)),
'dodgerblue': array((0.11764705882352941, 0.5647058823529412, 1.0)),
'firebrick': array((0.6980392156862745, 0.13333333333333333, 0.13333333333333333)),
'floralwhite': array((1.0, 0.9803921568627451, 0.9411764705882353)),
'forestgreen': array((0.13333333333333333, 0.5450980392156862, 0.13333333333333333)),
'fuchsia': array((1.0, 0.0, 1.0)),
'gainsboro': array((0.8627450980392157, 0.8627450980392157, 0.8627450980392157)),
'ghostwhite': array((0.9725490196078431, 0.9725490196078431, 1.0)),
'gold': array((1.0, 0.8431372549019608, 0.0)),
'goldenrod': array((0.8549019607843137, 0.6470588235294118, 0.12549019607843137)),
'gray': array((0.5019607843137255, 0.5019607843137255, 0.5019607843137255)),
'green': array((0.0, 0.5019607843137255, 0.0)),
'greenyellow': array((0.6784313725490196, 1.0, 0.1843137254901961)),
'grey': array((0.5019607843137255, 0.5019607843137255, 0.5019607843137255)),
'honeydew': array((0.9411764705882353, 1.0, 0.9411764705882353)),
'hotpink': array((1.0, 0.4117647058823529, 0.7058823529411765)),
'indianred': array((0.803921568627451, 0.3607843137254902, 0.3607843137254902)),
'indigo': array((0.29411764705882354, 0.0, 0.5098039215686274)),
'ivory': array((1.0, 1.0, 0.9411764705882353)),
'khaki': array((0.9411764705882353, 0.9019607843137255, 0.5490196078431373)),
'lavender': array((0.9019607843137255, 0.9019607843137255, 0.9803921568627451)),
'lavenderblush': array((1.0, 0.9411764705882353, 0.9607843137254902)),
'lawngreen': array((0.48627450980392156, 0.9882352941176471, 0.0)),
'lemonchiffon': array((1.0, 0.9803921568627451, 0.803921568627451)),
'lightblue': array((0.6784313725490196, 0.8470588235294118, 0.9019607843137255)),
'lightcoral': array((0.9411764705882353, 0.5019607843137255, 0.5019607843137255)),
'lightcyan': array((0.8784313725490196, 1.0, 1.0)),
'lightgoldenrodyellow': array((0.9803921568627451, 0.9803921568627451, 0.8235294117647058)),
'lightgray': array((0.8274509803921568, 0.8274509803921568, 0.8274509803921568)),
'lightgreen': array((0.5647058823529412, 0.9333333333333333, 0.5647058823529412)),
'lightgrey': array((0.8274509803921568, 0.8274509803921568, 0.8274509803921568)),
'lightpink': array((1.0, 0.7137254901960784, 0.7568627450980392)),
'lightsage': array((0.7372549019607844, 0.9254901960784314, 0.6745098039215687)),
'lightsalmon': array((1.0, 0.6274509803921569, 0.47843137254901963)),
'lightseagreen': array((0.12549019607843137, 0.6980392156862745, 0.6666666666666666)),
'lightskyblue': array((0.5294117647058824, 0.807843137254902, 0.9803921568627451)),
'lightslategray': array((0.4666666666666667, 0.5333333333333333, 0.6)),
'lightslategrey': array((0.4666666666666667, 0.5333333333333333, 0.6)),
'lightsteelblue': array((0.6901960784313725, 0.7686274509803922, 0.8705882352941177)),
'lightyellow': array((1.0, 1.0, 0.8784313725490196)),
'lime': array((0.0, 1.0, 0.0)),
'limegreen': array((0.19607843137254902, 0.803921568627451, 0.19607843137254902)),
'linen': array((0.9803921568627451, 0.9411764705882353, 0.9019607843137255)),
'magenta': array((1.0, 0.0, 1.0)),
'maroon': array((0.5019607843137255, 0.0, 0.0)),
'mediumaquamarine': array((0.4, 0.803921568627451, 0.6666666666666666)),
'mediumblue': array((0.0, 0.0, 0.803921568627451)),
'mediumorchid': array((0.7294117647058823, 0.3333333333333333, 0.8274509803921568)),
'mediumpurple': array((0.5764705882352941, 0.4392156862745098, 0.8588235294117647)),
'mediumseagreen': array((0.23529411764705882, 0.7019607843137254, 0.44313725490196076)),
'mediumslateblue': array((0.4823529411764706, 0.40784313725490196, 0.9333333333333333)),
'mediumspringgreen': array((0.0, 0.9803921568627451, 0.6039215686274509)),
'mediumturquoise': array((0.2823529411764706, 0.8196078431372549, 0.8)),
'mediumvioletred': array((0.7803921568627451, 0.08235294117647059, 0.5215686274509804)),
'midnightblue': array((0.09803921568627451, 0.09803921568627451, 0.4392156862745098)),
'mintcream': array((0.9607843137254902, 1.0, 0.9803921568627451)),
'mistyrose': array((1.0, 0.8941176470588236, 0.8823529411764706)),
'moccasin': array((1.0, 0.8941176470588236, 0.7098039215686275)),
'navajowhite': array((1.0, 0.8705882352941177, 0.6784313725490196)),
'navy': array((0.0, 0.0, 0.5019607843137255)),
'oldlace': array((0.9921568627450981, 0.9607843137254902, 0.9019607843137255)),
'olive': array((0.5019607843137255, 0.5019607843137255, 0.0)),
'olivedrab': array((0.4196078431372549, 0.5568627450980392, 0.13725490196078433)),
'orange': array((1.0, 0.6470588235294118, 0.0)),
'orangered': array((1.0, 0.27058823529411763, 0.0)),
'orchid': array((0.8549019607843137, 0.4392156862745098, 0.8392156862745098)),
'palegoldenrod': array((0.9333333333333333, 0.9098039215686274, 0.6666666666666666)),
'palegreen': array((0.596078431372549, 0.984313725490196, 0.596078431372549)),
'paleturquoise': array((0.6862745098039216, 0.9333333333333333, 0.9333333333333333)),
'palevioletred': array((0.8588235294117647, 0.4392156862745098, 0.5764705882352941)),
'papayawhip': array((1.0, 0.9372549019607843, 0.8352941176470589)),
'peachpuff': array((1.0, 0.8549019607843137, 0.7254901960784313)),
'peru': array((0.803921568627451, 0.5215686274509804, 0.24705882352941178)),
'pink': array((1.0, 0.7529411764705882, 0.796078431372549)),
'plum': array((0.8666666666666667, 0.6274509803921569, 0.8666666666666667)),
'powderblue': array((0.6901960784313725, 0.8784313725490196, 0.9019607843137255)),
'purple': array((0.5019607843137255, 0.0, 0.5019607843137255)),
'red': array((1.0, 0.0, 0.0)),
'rosybrown': array((0.7372549019607844, 0.5607843137254902, 0.5607843137254902)),
'royalblue': array((0.2549019607843137, 0.4117647058823529, 0.8823529411764706)),
'saddlebrown': array((0.5450980392156862, 0.27058823529411763, 0.07450980392156863)),
'sage': array((0.5294117647058824, 0.6823529411764706, 0.45098039215686275)),
'salmon': array((0.9803921568627451, 0.5019607843137255, 0.4470588235294118)),
'sandybrown': array((0.9803921568627451, 0.6431372549019608, 0.3764705882352941)),
'seagreen': array((0.1803921568627451, 0.5450980392156862, 0.3411764705882353)),
'seashell': array((1.0, 0.9607843137254902, 0.9333333333333333)),
'sienna': array((0.6274509803921569, 0.3215686274509804, 0.17647058823529413)),
'silver': array((0.7529411764705882, 0.7529411764705882, 0.7529411764705882)),
'skyblue': array((0.5294117647058824, 0.807843137254902, 0.9215686274509803)),
'slateblue': array((0.41568627450980394, 0.35294117647058826, 0.803921568627451)),
'slategray': array((0.4392156862745098, 0.5019607843137255, 0.5647058823529412)),
'slategrey': array((0.4392156862745098, 0.5019607843137255, 0.5647058823529412)),
'snow': array((1.0, 0.9803921568627451, 0.9803921568627451)),
'springgreen': array((0.0, 1.0, 0.4980392156862745)),
'steelblue': array((0.27450980392156865, 0.5098039215686274, 0.7058823529411765)),
'tan': array((0.8235294117647058, 0.7058823529411765, 0.5490196078431373)),
'teal': array((0.0, 0.5019607843137255, 0.5019607843137255)),
'thistle': array((0.8470588235294118, 0.7490196078431373, 0.8470588235294118)),
'tomato': array((1.0, 0.38823529411764707, 0.2784313725490196)),
'turquoise': array((0.25098039215686274, 0.8784313725490196, 0.8156862745098039)),
'violet': array((0.9333333333333333, 0.5098039215686274, 0.9333333333333333)),
'wheat': array((0.9607843137254902, 0.8705882352941177, 0.7019607843137254)),
'white': array((1.0, 1.0, 1.0)),
'whitesmoke': array((0.9607843137254902, 0.9607843137254902, 0.9607843137254902)),
'yellow': array((1.0, 1.0, 0.0)),
'yellowgreen': array((0.6039215686274509, 0.803921568627451, 0.19607843137254902))
}
def name_to_rgb(color_name):
global cnames
return cnames[color_name]
def name_to_hsv(color_name):
return rgb_to_hsv(name_to_rgb(color_name))
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g=None, b=None):
if g is None and b is None:
r, g, b = r
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s=None, v=None):
if s is None and v is None:
h, s, v = h
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
def __to_array(pixel):
if isinstance(pixel, str):
pixel = name_to_rgb(pixel)
return array(pixel)
def add(pixel_1, pixel_2):
"""
Addition of two RGB or named pixels
:param pixel_: Color name or tuple, list, array
:param pixel_2: Color name or tuple, list, array
:return: array(r, g, b)
"""
pixel_1 = __to_array(pixel_1)
pixel_2 = __to_array(pixel_2)
return pixel_1 + pixel_2
def mul(pixel, scalar):
"""
Multiplication of a RGB or named pixel with a scalar
:param pixel: Color name or tuple, list, array
:param scalar: int or float to multiply the array with
:return: array(r, g, b)
"""
pixel = __to_array(pixel)
if not isscalar(scalar):
raise TypeError("Expected a scalar for pixel multiplication, got {}".format(type(scalar)))
return pixel*scalar
def equal(pixel_1, pixel_2):
"""
Return true if these two colors are strictly equal
:param pixel_1: Color name or tuple, list, array
:param pixel_2: Color name or tuple, list, array
:return: True if they are strictly equal, False otherwise
"""
pixel_1 = __to_array(pixel_1)
pixel_2 = __to_array(pixel_2)
return (pixel_1 == pixel_2).all(0)
|
gpl-3.0
|
mhdella/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
cheral/orange3
|
Orange/projection/manifold.py
|
2
|
5160
|
import numpy as np
import sklearn.manifold as skl_manifold
from Orange.distance import (SklDistance, SpearmanDistance, PearsonDistance,
Euclidean)
from Orange.projection import SklProjector
__all__ = ["MDS", "Isomap", "LocallyLinearEmbedding", "SpectralEmbedding",
"TSNE"]
def torgerson(distances, n_components=2):
"""
Perform classical mds (Torgerson scaling).
..note ::
If the distances are euclidean then this is equivalent to projecting
the original data points to the first `n` principal components.
"""
distances = np.asarray(distances)
assert distances.shape[0] == distances.shape[1]
N = distances.shape[0]
# O ^ 2
D_sq = distances ** 2
# double center the D_sq
rsum = np.sum(D_sq, axis=1, keepdims=True)
csum = np.sum(D_sq, axis=0, keepdims=True)
total = np.sum(csum)
D_sq -= rsum / N
D_sq -= csum / N
D_sq += total / (N ** 2)
B = np.multiply(D_sq, -0.5, out=D_sq)
U, L, _ = np.linalg.svd(B)
if n_components > N:
U = np.hstack((U, np.zeros((N, n_components - N))))
L = np.hstack((L, np.zeros((n_components - N))))
U = U[:, :n_components]
L = L[:n_components]
D = np.diag(np.sqrt(L))
return np.dot(U, D)
class MDS(SklProjector):
__wraps__ = skl_manifold.MDS
name = 'MDS'
def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300,
eps=0.001, n_jobs=1, random_state=None,
dissimilarity='euclidean', init_type="random", init_data=None,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
self._metric = dissimilarity
self.init_type = init_type
self.init_data = init_data
def __call__(self, data):
distances = SklDistance, SpearmanDistance, PearsonDistance
if isinstance(self._metric, distances):
data = self.preprocess(data)
_X, Y, domain = data.X, data.Y, data.domain
X = dist_matrix = self._metric(_X)
self.params['dissimilarity'] = 'precomputed'
elif self._metric is 'precomputed':
dist_matrix, Y, domain = data, None, None
X = dist_matrix
else:
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
if self.init_type == "PCA":
dist_matrix = Euclidean(X)
if self.init_type == "PCA" and self.init_data is None:
self.init_data = torgerson(dist_matrix, self.params['n_components'])
clf = self.fit(X, Y=Y)
clf.domain = domain
return clf
def fit(self, X, Y=None):
proj = self.__wraps__(**self.params)
return proj.fit(X, init=self.init_data, y=Y)
class Isomap(SklProjector):
__wraps__ = skl_manifold.Isomap
name = 'Isomap'
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class LocallyLinearEmbedding(SklProjector):
__wraps__ = skl_manifold.LocallyLinearEmbedding
name = 'Locally Linear Embedding'
def __init__(self, n_neighbors=5, n_components=2, reg=0.001,
eigen_solver='auto', tol=1e-06, max_iter=100,
method='standard', hessian_tol=0.0001,
modified_tol=1e-12, neighbors_algorithm='auto',
random_state=None, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class SpectralEmbedding(SklProjector):
__wraps__ = skl_manifold.SpectralEmbedding
name = 'Spectral Embedding'
def __init__(self, n_components=2, affinity='nearest_neighbors', gamma=None,
random_state=None, eigen_solver=None, n_neighbors=None,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class TSNE(SklProjector):
__wraps__ = skl_manifold.TSNE
name = 't-SNE'
def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=4.0,
learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30,
min_grad_norm=1e-07, metric='euclidean', init='random',
random_state=None, method='barnes_hut', angle=0.5,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
def __call__(self, data):
if self.params['metric'] is 'precomputed':
X, Y, domain = data, None, None
else:
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
distances = SklDistance, SpearmanDistance, PearsonDistance
if isinstance(self.params['metric'], distances):
X = self.params['metric'](X)
self.params['metric'] = 'precomputed'
clf = self.fit(X, Y=Y)
clf.domain = domain
return clf
|
bsd-2-clause
|
zaxtax/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
22
|
1848
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
josephcslater/scipy
|
scipy/signal/spectral.py
|
6
|
66649
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)),
axis=0)
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if neccessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving
average filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input valdiation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating
data?", http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
|
bsd-3-clause
|
ThomasMiconi/htmresearch
|
projects/capybara/anomaly_detection/plot_results.py
|
9
|
1755
|
__author__ = 'mleborgne'
import matplotlib.pyplot as plt
import csv
import os
from settings import (METRICS,
SENSORS,
PATIENT_IDS,
ANOMALY_LIKELIHOOD_THRESHOLD,
MODEL_RESULTS_DIR,
PLOT_RESULTS_DIR)
for patient in PATIENT_IDS:
for sensor in SENSORS:
fig = plt.figure(figsize=(40, 10))
plot_index = 1
for metric in METRICS:
fileName = "%s/%s_%s_%s_out.csv" % (MODEL_RESULTS_DIR,
metric,
sensor,
patient)
inputFile = open(fileName, "rb")
csvReader = csv.reader(inputFile)
csvReader.next() # skip header row
t = []
metric_values = []
for row in csvReader:
timestep = int(row[0])
t.append(timestep)
metric_values.append(row[1])
anomaly_likelyhood = row[4]
if float(anomaly_likelyhood) > ANOMALY_LIKELIHOOD_THRESHOLD:
fig.add_subplot(3, 1, plot_index)
plt.axvspan(timestep, timestep, color='red', alpha=0.5)
fig.add_subplot(3, 1, plot_index)
plt.plot(t, metric_values)
plt.title("Sensor: %s | Metric: %s | Patient ID: %s | "
"Anomaly Likelyhood Threshold: %s" %
(sensor, metric, patient, ANOMALY_LIKELIHOOD_THRESHOLD))
plot_index += 1
if not os.path.exists(PLOT_RESULTS_DIR):
os.makedirs(PLOT_RESULTS_DIR)
plt.savefig('%s/%s_%s_%s.png' % (PLOT_RESULTS_DIR,
patient,
sensor,
ANOMALY_LIKELIHOOD_THRESHOLD))
|
agpl-3.0
|
mwv/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
heli522/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
2903
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
hitszxp/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
235
|
3891
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
|
bsd-3-clause
|
HealthCatalyst/healthcareai-py
|
healthcareai/tests/test_trainer.py
|
4
|
7124
|
import os
import sys
import unittest
from contextlib import contextmanager
from io import StringIO
from healthcareai.common.healthcareai_error import HealthcareAIError
from healthcareai.supervised_model_trainer import SupervisedModelTrainer
import healthcareai.tests.helpers as helpers
from healthcareai.trained_models.trained_supervised_model import TrainedSupervisedModel
import healthcareai.datasets as hcai_datasets
class TestSupervisedModelTrainer(unittest.TestCase):
@classmethod
def setUpClass(cls):
df = hcai_datasets.load_diabetes()
# Drop columns that won't help machine learning
columns_to_remove = ['PatientID']
df.drop(columns_to_remove, axis=1, inplace=True)
cls.classification_trainer = SupervisedModelTrainer(dataframe=df,
predicted_column='ThirtyDayReadmitFLG',
model_type='classification',
impute=True,
grain_column='PatientEncounterID',
verbose=False)
cls.regression_trainer = SupervisedModelTrainer(df,
'SystolicBPNBR',
'regression',
grain_column='PatientEncounterID',
impute=True,
verbose=False)
cls.regression_trainer_impute_false = SupervisedModelTrainer(df,
'SystolicBPNBR',
'regression',
grain_column='PatientEncounterID',
impute=False,
verbose=False)
def test_knn(self):
trained_knn = self.classification_trainer.knn()
result = trained_knn.metrics
self.assertIsInstance(trained_knn, TrainedSupervisedModel)
helpers.assertBetween(self, 0.5, 0.95, result['roc_auc'])
helpers.assertBetween(self, 0.79, 0.95, result['accuracy'])
# TODO see if there is a way to make this test work in travisCI. It fails with this error:
# TODO > _tkinter.TclError: no display name and no $DISPLAY environment variable
@unittest.skipIf("SKIP_MSSQL_TESTS" in os.environ and os.environ["SKIP_MSSQL_TESTS"] == "true",
"Skipping this on Travis CI.")
def test_random_forest_classification(self):
# Force plot to save to prevent matplotlib blocking during testing
trained_random_forest = self.classification_trainer.random_forest_classification(save_plot=True)
result = trained_random_forest.metrics
self.assertIsInstance(trained_random_forest, TrainedSupervisedModel)
helpers.assertBetween(self, 0.65, 0.95, result['roc_auc'])
helpers.assertBetween(self, 0.8, 0.95, result['accuracy'])
# Clean up saved plot (see note above)
try:
os.remove('FeatureImportances.png')
except OSError:
pass
def test_linear_regression(self):
trained_linear_model = self.regression_trainer.linear_regression()
self.assertIsInstance(trained_linear_model, TrainedSupervisedModel)
result = trained_linear_model.metrics
helpers.assertBetween(self, 450, 800, result['mean_squared_error'])
helpers.assertBetween(self, 16, 29, result['mean_absolute_error'])
def test_random_forest_regression(self):
trained_rf_regressor = self.regression_trainer.random_forest_regression()
self.assertIsInstance(trained_rf_regressor, TrainedSupervisedModel)
result = trained_rf_regressor.metrics
helpers.assertBetween(self, 350, 750, result['mean_squared_error'])
helpers.assertBetween(self, 10, 25, result['mean_absolute_error'])
def test_logistic_regression(self):
trained_lr = self.classification_trainer.logistic_regression()
self.assertIsInstance(trained_lr, TrainedSupervisedModel)
result = trained_lr.metrics
helpers.assertBetween(self, 0.52, 0.95, result['roc_auc'])
helpers.assertBetween(self, 0.6, 0.95, result['accuracy'])
def test_ensemble_classification(self):
trained_ensemble = self.classification_trainer.ensemble()
self.assertIsInstance(trained_ensemble, TrainedSupervisedModel)
result = trained_ensemble.metrics
helpers.assertBetween(self, 0.6, 0.97, result['roc_auc'])
helpers.assertBetween(self, 0.6, 0.97, result['accuracy'])
def test_ensemble_regression(self):
self.assertRaises(HealthcareAIError, self.regression_trainer.ensemble)
def test_linear_regression_raises_error_on_missing_columns(self):
# TODO how is this working since the model does not use the training df???
training_df = hcai_datasets.load_diabetes()
# Drop columns that won't help machine learning
training_df.drop(['PatientID'], axis=1, inplace=True)
# Train the linear regression model
trained_linear_model = self.regression_trainer.linear_regression()
# Load a new df for predicting
prediction_df = hcai_datasets.load_diabetes()
# Drop columns that model expects
prediction_df.drop('GenderFLG', axis=1, inplace=True)
# Make some predictions
self.assertRaises(HealthcareAIError, trained_linear_model.make_predictions, prediction_df)
def test_linear_regression_raises_error_on_roc_plot(self):
# Train the linear regression model
trained_linear_model = self.regression_trainer.linear_regression()
# Try the ROC plot
self.assertRaises(HealthcareAIError, trained_linear_model.roc_plot)
def test_impute_false_nan_data(self):
# Train the linear regression model with impute = False
trained_linear_model = self.regression_trainer_impute_false.linear_regression()
# Load a new df for predicting
prediction_df = hcai_datasets.load_diabetes()
# Assert that the number of rows of prediction should be equal between df and model predictions
self.assertEqual(len(trained_linear_model.make_predictions(prediction_df)), len(prediction_df))
@contextmanager
def captured_output():
"""
A quick and dirty context manager that captures STDOUT and STDERR to enable testing of functions that print() things
"""
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
if __name__ == '__main__':
unittest.main()
|
mit
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/model/forecast/test_tcmf_forecaster.py
|
1
|
14341
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from zoo.chronos.model.forecast.tcmf_forecaster import TCMFForecaster
from unittest import TestCase
import tempfile
import pandas as pd
class TestChronosModelTCMFForecaster(TestCase):
def setUp(self):
self.model = TCMFForecaster()
self.num_samples = 300
self.horizon = np.random.randint(1, 50)
self.seq_len = 480
self.data = np.random.rand(self.num_samples, self.seq_len)
self.id = np.arange(self.num_samples)
self.data_new = np.random.rand(self.num_samples, self.horizon)
self.fit_params = dict(val_len=12,
start_date="2020-1-1",
freq="5min",
y_iters=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
def test_forecast_tcmf_ndarray(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input, **self.fit_params)
assert not self.model.is_xshards_distributed()
# test predict
yhat = self.model.predict(horizon=self.horizon)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new}) # 1st time
self.model.fit_incremental({'y': self.data_new}) # 2nd time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_tcmf_ndarray_covariates_dti(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input,
covariates=np.random.rand(3, self.seq_len),
dti=pd.date_range('20130101', periods=self.seq_len),
**self.fit_params)
future_covariates = np.random.randn(3, self.horizon)
future_dti = pd.date_range('20130101', periods=self.horizon)
# test predict
yhat = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value,
target_covariates=future_covariates,
target_dti=future_dti,
metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new},
covariates_incr=future_covariates,
dti_incr=future_dti,)
yhat_incr = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_forecast_ndarray_error(self):
# is_xshards_distributed
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
# fit
input = dict({'data': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
input = dict({'y': "abc"})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the value of y should be an ndarray" in str(context.exception))
id_diff = np.arange(200)
input = dict({'id': id_diff, 'y': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the length of the id array should be equal to the number of"
in str(context.exception))
input_right = dict({'id': self.id, 'y': self.data})
self.model.fit(input_right, **self.fit_params)
with self.assertRaises(Exception) as context:
self.model.fit(input_right)
self.assertTrue('This model has already been fully trained' in str(context.exception))
# fit_incremental
data_id_diff = {'id': self.id - 1, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_id_diff)
self.assertTrue('The input ids in fit_incremental differs from input ids in fit'
in str(context.exception))
# evaluate
target_value_fake = dict({"data": self.data_new})
with self.assertRaises(Exception) as context:
self.model.evaluate(target_value=target_value_fake, metric=['mse'])
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
def test_forecast_tcmf_without_id(self):
# construct data
input = dict({'y': self.data})
self.model.fit(input, **self.fit_params)
assert not self.model.is_xshards_distributed()
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
assert "id" not in yhat_loaded
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
target_value = dict({"y": self.data_new})
self.model.evaluate(target_value=target_value, metric=['mse'])
self.model.fit_incremental({'y': self.data_new}) # 1st time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
data_new_id = {'id': self.id, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_new_id)
self.assertTrue('Got valid id in fit_incremental and invalid id in fit.'
in str(context.exception))
def test_forecast_tcmf_xshards(self):
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
import pandas as pd
OrcaContext.pandas_read_backend = "pandas"
def preprocessing(df, id_name, y_name):
id = df.index
data = df.to_numpy()
result = dict({id_name: id, y_name: data})
return result
def postprocessing(pred_results, output_dt_col_name):
id_arr = pred_results["id"]
pred_results = pred_results["prediction"]
pred_results = np.concatenate((np.expand_dims(id_arr, axis=1), pred_results), axis=1)
final_df = pd.DataFrame(pred_results, columns=["id"] + output_dt_col_name)
final_df.id = final_df.id.astype("int")
final_df = final_df.set_index("id")
final_df.columns.name = "datetime"
final_df = final_df.unstack().reset_index().rename({0: "prediction"}, axis=1)
return final_df
def get_pred(d):
return d["prediction"]
with tempfile.NamedTemporaryFile() as temp:
data = np.random.rand(300, 480)
df = pd.DataFrame(data)
df.to_csv(temp.name)
shard = zoo.orca.data.pandas.read_csv(temp.name)
shard.cache()
shard_train = shard.transform_shard(preprocessing, 'id', 'data')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'cid', 'y')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `id` doesn't exist in x" in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'id', 'y')
self.model.fit(shard_train, **self.fit_params)
assert self.model.is_xshards_distributed()
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.fit_incremental(shard_train)
self.assertTrue('NotImplementedError' in context.exception.__class__.__name__)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname + "/model")
loaded_model = TCMFForecaster.load(tempdirname + "/model", is_xshards_distributed=True)
horizon = np.random.randint(1, 50)
yhat_shard_origin = self.model.predict(horizon=horizon)
yhat_list_origin = yhat_shard_origin.collect()
yhat_list_origin = list(map(get_pred, yhat_list_origin))
yhat_shard = loaded_model.predict(horizon=horizon)
yhat_list = yhat_shard.collect()
yhat_list = list(map(get_pred, yhat_list))
yhat_origin = np.concatenate(yhat_list_origin)
yhat = np.concatenate(yhat_list)
assert yhat.shape == (300, horizon)
np.testing.assert_equal(yhat, yhat_origin)
output_dt_col_name = pd.date_range(start='2020-05-01', periods=horizon, freq='H').to_list()
yhat_df_shards = yhat_shard.transform_shard(postprocessing, output_dt_col_name)
final_df_list = yhat_df_shards.collect()
final_df = pd.concat(final_df_list)
final_df.sort_values("datetime", inplace=True)
assert final_df.shape == (300 * horizon, 3)
OrcaContext.pandas_read_backend = "spark"
def test_forecast_tcmf_distributed(self):
input = dict({'id': self.id, 'y': self.data})
from zoo.orca import init_orca_context, stop_orca_context
init_orca_context(cores=4, spark_log_level="INFO", init_ray_on_spark=True,
object_store_memory="1g")
self.model.fit(input, num_workers=4, **self.fit_params)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon, num_workers=4)
yhat_loaded = loaded_model.predict(horizon=self.horizon, num_workers=4)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_equal(yhat, yhat_loaded)
self.model.fit_incremental({'y': self.data_new})
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
stop_orca_context()
if __name__ == "__main__":
pytest.main([__file__])
|
apache-2.0
|
Adai0808/BuildingMachineLearningSystemsWithPython
|
ch08/chapter.py
|
21
|
6372
|
import numpy as np # NOT IN BOOK
from matplotlib import pyplot as plt # NOT IN BOOK
def load():
import numpy as np
from scipy import sparse
data = np.loadtxt('data/ml-100k/u.data')
ij = data[:, :2]
ij -= 1 # original data is in 1-based system
values = data[:, 2]
reviews = sparse.csc_matrix((values, ij.T)).astype(float)
return reviews.toarray()
reviews = load()
U,M = np.where(reviews)
import random
test_idxs = np.array(random.sample(range(len(U)), len(U)//10))
train = reviews.copy()
train[U[test_idxs], M[test_idxs]] = 0
test = np.zeros_like(reviews)
test[U[test_idxs], M[test_idxs]] = reviews[U[test_idxs], M[test_idxs]]
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
if self.axis == 1:
features = features.T
# count features that are greater than zero in axis 0:
binary = (features > 0)
count0 = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count0[count0 == 0] = 1.
# computing the mean is easy:
self.mean = features.sum(axis=0)/count0
# only consider differences where binary is True:
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count0)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
norm = NormalizePositive(axis=1)
binary = (train > 0)
train = norm.fit_transform(train)
# plot just 200x200 area for space reasons
plt.imshow(binary[:200, :200], interpolation='nearest')
from scipy.spatial import distance
# compute all pair-wise distances:
dists = distance.pdist(binary, 'correlation')
# Convert to square form, so that dists[i,j]
# is distance between binary[i] and binary[j]:
dists = distance.squareform(dists)
neighbors = dists.argsort(axis=1)
# We are going to fill this matrix with results
filled = train.copy()
for u in range(filled.shape[0]):
# n_u is neighbors of user
n_u = neighbors[u, 1:]
for m in range(filled.shape[1]):
# get relevant reviews in order!
revs = [train[neigh, m]
for neigh in n_u
if binary [neigh, m]]
if len(revs):
# n is the number of reviews for this movie
n = len(revs)
# take half of the reviews plus one into consideration:
n //= 2
n += 1
revs = revs[:n]
filled[u,m] = np.mean(revs)
predicted = norm.inverse_transform(filled)
from sklearn import metrics
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary neighbors): {:.1%}'.format(r2))
reviews = reviews.T
# use same code as before
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary movie neighbors): {:.1%}'.format(r2))
from sklearn.linear_model import ElasticNetCV # NOT IN BOOK
reg = ElasticNetCV(alphas=[
0.0125, 0.025, 0.05, .125, .25, .5, 1., 2., 4.])
filled = train.copy()
# iterate over all users:
for u in range(train.shape[0]):
curtrain = np.delete(train, u, axis=0)
bu = binary[u]
reg.fit(curtrain[:,bu].T, train[u, bu])
filled[u, ~bu] = reg.predict(curtrain[:,~bu].T)
predicted = norm.inverse_transform(filled)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (user regression): {:.1%}'.format(r2))
# SHOPPING BASKET ANALYSIS
# This is the slow version of the code, which will take a long time to
# complete.
from collections import defaultdict
from itertools import chain
# File is downloaded as a compressed file
import gzip
# file format is a line per transaction
# of the form '12 34 342 5...'
dataset = [[int(tok) for tok in line.strip().split()]
for line in gzip.open('data/retail.dat.gz')]
dataset = [set(d) for d in dataset]
# count how often each product was purchased:
counts = defaultdict(int)
for elem in chain(*dataset):
counts[elem] += 1
minsupport = 80
valid = set(k for k,v in counts.items() if (v >= minsupport))
itemsets = [frozenset([v]) for v in valid]
freqsets = []
for i in range(16):
nextsets = []
tested = set()
for it in itemsets:
for v in valid:
if v not in it:
# Create a new candidate set by adding v to it
c = (it | frozenset([v]))
# check If we have tested it already
if c in tested:
continue
tested.add(c)
# Count support by looping over dataset
# This step is slow.
# Check `apriori.py` for a better implementation.
support_c = sum(1 for d in dataset if d.issuperset(c))
if support_c > minsupport:
nextsets.append(c)
freqsets.extend(nextsets)
itemsets = nextsets
if not len(itemsets):
break
print("Finished!")
minlift = 5.0
nr_transactions = float(len(dataset))
for itemset in freqsets:
for item in itemset:
consequent = frozenset([item])
antecedent = itemset-consequent
base = 0.0
# acount: antecedent count
acount = 0.0
# ccount : consequent count
ccount = 0.0
for d in dataset:
if item in d: base += 1
if d.issuperset(itemset): ccount += 1
if d.issuperset(antecedent): acount += 1
base /= nr_transactions
p_y_given_x = ccount/acount
lift = p_y_given_x / base
if lift > minlift:
print('Rule {0} -> {1} has lift {2}'
.format(antecedent, consequent,lift))
|
mit
|
mjgrav2001/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
dyoung418/tensorflow
|
tensorflow/contrib/factorization/python/ops/kmeans.py
|
19
|
17291
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to convert the input points into a usable format.
Args:
features: The input points.
Returns:
If `features` is a dict of `k` features, each of which is a vector of `n`
scalars, the return value is a Tensor of shape `(n, k)` representing `n`
input points, where the items in the `k` dimension are sorted
lexicographically by `features` key. If `features` is not a dict, it is
returned unmodified.
"""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], axis=1)
return features
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `1abels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See @{tf.estimator.Estimator}.
mode: See @{tf.estimator.Estimator}.
config: See @{tf.estimator.Estimator}.
Returns:
A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_tensor_or_dict(features)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# cluster_centers_var: a Variable containing the cluster centers.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering."""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following:
* a tensor or numpy array with the initial cluster centers.
* a callable `f(inputs, k)` that selects and returns up to `k` centers
from an input batch. `f` is free to return any number of centers
from `0` to `k`. It will be invoked on successive input batches
as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the
entire batch is chosen to be initial cluster centers and the
remaining centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less
than `num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as `||u - v||_2` which is
the square root of the sum of the absolute squares of the elements'
difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as `1 - (u . v) / (||u||_2 ||v||_2)`.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See @{tf.estimator.Estimator}.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(
num_clusters, initial_clusters, distance_metric, random_seed,
use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.evaluate}. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(clustering_ops.CLUSTERS_VAR_NAME)
|
apache-2.0
|
ibis-project/ibis
|
ibis/backends/tests/test_timecontext.py
|
1
|
3059
|
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
from ibis.config import option_context
from .test_vectorized_udf import calc_mean, create_demean_struct_udf
GROUPBY_COL = 'month'
ORDERBY_COL = 'timestamp_col'
TARGET_COL = 'float_col'
@pytest.fixture
def context():
# These need to be tz-naive because the timestamp_col in
# the test data is tz-naive
return pd.Timestamp('20090105'), pd.Timestamp('20090111')
def filter_by_time_context(df, context):
return df[
(df['timestamp_col'] >= context[0])
& (df['timestamp_col'] < context[1])
]
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.min_spark_version('3.1')
@pytest.mark.parametrize(
'window',
[
ibis.trailing_window(ibis.interval(days=3), order_by=ORDERBY_COL),
ibis.trailing_window(
ibis.interval(days=3), order_by=ORDERBY_COL, group_by=GROUPBY_COL,
),
],
)
def test_context_adjustment_window_udf(alltypes, df, context, window):
""" This test case aims to test context adjustment of
udfs in window method.
"""
with option_context('context_adjustment.time_col', 'timestamp_col'):
expr = alltypes.mutate(v1=calc_mean(alltypes[TARGET_COL]).over(window))
result = expr.execute(timecontext=context)
expected = expr.execute()
expected = filter_by_time_context(expected, context).reset_index(
drop=True
)
tm.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
def test_context_adjustment_filter_before_window(alltypes, df, context):
with option_context('context_adjustment.time_col', 'timestamp_col'):
window = ibis.trailing_window(
ibis.interval(days=3), order_by=ORDERBY_COL
)
expr = alltypes[alltypes['bool_col']]
expr = expr.mutate(v1=expr[TARGET_COL].count().over(window))
result = expr.execute(timecontext=context)
expected = expr.execute()
expected = filter_by_time_context(expected, context)
expected = expected.reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas'])
def test_context_adjustment_multi_col_udf_non_grouped(alltypes, df, context):
with option_context('context_adjustment.time_col', 'timestamp_col'):
w = ibis.window(preceding=None, following=None)
demean_struct_udf = create_demean_struct_udf(
result_formatter=lambda v1, v2: (v1, v2)
)
result = alltypes.mutate(
demean_struct_udf(alltypes['double_col'], alltypes['int_col'])
.over(w)
.destructure()
).execute(timecontext=context)
expected = alltypes.mutate(
demean=alltypes['double_col']
- alltypes['double_col'].mean().over(w),
demean_weight=alltypes['int_col']
- alltypes['int_col'].mean().over(w),
).execute(timecontext=context)
tm.assert_frame_equal(result, expected)
|
apache-2.0
|
smartscheduling/scikit-learn-categorical-tree
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
Vimos/scikit-learn
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
44
|
2262
|
""" test the label propagation module """
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.semi_supervised import label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelPropagation, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
bsd-3-clause
|
siou83/trading-with-python
|
lib/interactiveBrokers/histData.py
|
76
|
6472
|
'''
Created on May 8, 2013
Copyright: Jev Kuznetsov
License: BSD
Module for downloading historic data from IB
'''
import ib
import pandas as pd
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
import logger as logger
from pandas import DataFrame, Index
import os
import datetime as dt
import time
from time import sleep
from extra import timeFormat, dateFormat
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pd.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1 D',barSizeSetting='30 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
if isinstance(endDateTime,dt.datetime): # convert to string
endDateTime = endDateTime.strftime(timeFormat)
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
# def getIntradayData(self,contract, dateTuple ):
# ''' get full day data on 1-s interval
# date: a tuple of (yyyy,mm,dd)
# '''
#
# openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
# closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
#
# timeRange = pd.date_range(openTime,closeTime,freq='30min')
#
# datasets = []
#
# for t in timeRange:
# datasets.append(self.requestData(contract,t.strftime(timeFormat)))
#
# return pd.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class _HistDataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date,dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class TimeKeeper(object):
'''
class for keeping track of previous requests, to satify the IB requirements
(max 60 requests / 10 min)
each time a requiest is made, a timestamp is added to a txt file in the user dir.
'''
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~')+'/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir,'requests.txt'))
# Create file if it's missing
if not os.path.exists(self.dataFile):
open(self.dataFile,'w').close()
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
''' adds a timestamp of current request'''
with open(self.dataFile,'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat)+'\n')
def nrRequests(self,timeSpan=600):
''' return number of requests in past timespan (s) '''
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile,'r') as f:
lines = f.readlines()
for line in lines:
if now-dt.datetime.strptime(line.strip(),self._timeFormat) < delta:
requests+=1
if requests==0: # erase all contents if no requests are relevant
open(self.dataFile,'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
if __name__ == '__main__':
from extra import createContract
dl = Downloader(debug=True) # historic data downloader class
contract = createContract('SPY') # create contract using defaults (STK,SMART,USD)
data = dl.requestData(contract,"20141208 16:00:00 EST") # request 30-second data bars up till now
data.to_csv('SPY.csv') # write data to csv
print 'Done'
|
bsd-3-clause
|
chengsoonong/crowdastro
|
crowdastro/import_data.py
|
1
|
34208
|
"""Imports and standardises data into crowdastro.
Matthew Alger
The Australian National University
2016
"""
import argparse
import csv
import hashlib
import logging
import os
from astropy.coordinates import SkyCoord
import astropy.io.fits
from astropy.io import ascii
import astropy.utils.exceptions
import astropy.wcs
import h5py
import numpy
import scipy.spatial.distance
import sklearn.neighbors
from .config import config
from .exceptions import CatalogueError
VERSION = '0.5.1' # Data version, not module version!
# max number of components * individual component signature size.
MAX_RADIO_SIGNATURE_LENGTH = 50
MAX_NAME_LENGTH = 50 # b
MAX_ZOONIVERSE_ID_LENGTH = 20 # b
PATCH_RADIUS = config['patch_radius'] # px
ARCMIN = 1 / 60 # deg
CANDIDATE_RADIUS = ARCMIN # deg
FITS_CONVENTION = 1
def hash_file(f):
"""Finds the MD5 hash of a file.
File must be opened in bytes mode.
"""
h = hashlib.md5()
chunk_size = 65536 # 64 KiB
for chunk in iter(lambda: f.read(chunk_size), b''):
h.update(chunk)
return h.hexdigest()
def checksum_file(filename, h):
"""Checks files hash to expected hashes.
filename: str.
h: Hex hash string to compare against.
-> True iff file matches hash.
"""
with open(filename, 'rb') as f:
h_ = hash_file(f)
return h_ == h
def prep_h5(f_h5, ir_survey):
"""Creates hierarchy in HDF5 file."""
f_h5.create_group('/atlas/cdfs')
f_h5.create_group('/atlas/elais')
f_h5.create_group('/{}/cdfs'.format(ir_survey))
f_h5.create_group('/{}/elais'.format(ir_survey))
f_h5.attrs['version'] = VERSION
f_h5.attrs['ir_survey'] = ir_survey
def import_atlas(f_h5, test=False, field='cdfs'):
"""Imports the ATLAS dataset into crowdastro, as well as associated SWIRE.
f_h5: An HDF5 file.
test: Flag to run on only 10 subjects. Default False.
"""
from . import rgz_data as data
# Fetch groups from HDF5.
cdfs = f_h5['/atlas/{}'.format(field)]
# First pass, I'll find coords, names, and Zooniverse IDs, as well as how
# many data points there are.
coords = []
names = []
zooniverse_ids = []
if (field == 'cdfs'):
# We need the ATLAS name, but we can only get it by going through the
# ATLAS catalogue and finding the nearest component.
# https://github.com/chengsoonong/crowdastro/issues/63
# Fortunately, @jbanfield has already done this, so we can just load
# that CSV and match the names.
# TODO(MatthewJA): This matches the ATLAS component ID, but maybe we
# should be using the name instead.
rgz_to_atlas = {}
with open(config['data_sources']['rgz_to_atlas']) as f:
reader = csv.DictReader(f)
for row in reader:
rgz_to_atlas[row['ID_RGZ']] = row['ID']
all_subjects = data.get_all_subjects(survey='atlas', field=field)
if test:
all_subjects = all_subjects.limit(10)
for subject in all_subjects:
ra, dec = subject['coords']
zooniverse_id = subject['zooniverse_id']
rgz_source_id = subject['metadata']['source']
if rgz_source_id not in rgz_to_atlas:
logging.debug('Skipping %s; no matching ATLAS component.',
zooniverse_id)
continue
name = rgz_to_atlas[rgz_source_id]
# Store the results.
coords.append((ra, dec))
names.append(name)
zooniverse_ids.append(zooniverse_id)
elif (field == 'elais'):
atlascatalogue = ascii.read(config['data_sources']['atlas_catalogue'])
ras, decs = atlascatalogue['RA_deg'], atlascatalogue['Dec_deg']
e_ids = atlascatalogue['ID']
fields = atlascatalogue['field']
# Store the results.
for ra, dec, e_id, field_ in zip(ras, decs, e_ids, fields):
if (field_ == 'ELAIS-S1'):
coords.append((ra, dec))
names.append(e_id)
zooniverse_ids.append(e_id)
n_cdfs = len(names)
# Sort the data by Zooniverse ID.
coords_to_zooniverse_ids = dict(zip(coords, zooniverse_ids))
names_to_zooniverse_ids = dict(zip(names, zooniverse_ids))
coords.sort(key=coords_to_zooniverse_ids.get)
names.sort(key=names_to_zooniverse_ids.get)
zooniverse_ids.sort()
# Begin to store the data. We will have two tables: one for numeric data,
# and one for strings. We will have to preallocate the numeric table so that
# we aren't storing huge amounts of image data in memory.
# Strings.
dtype = [('zooniverse_id', '<S{}'.format(MAX_ZOONIVERSE_ID_LENGTH)),
('name', '<S{}'.format(MAX_NAME_LENGTH))]
string_data = numpy.array(list(zip(zooniverse_ids, names)), dtype=dtype)
cdfs.create_dataset('string', data=string_data, dtype=dtype)
# Numeric.
image_size = (config['surveys']['atlas']['fits_width'] *
config['surveys']['atlas']['fits_height'])
# RA, DEC, radio, (distance to SWIRE object added later)
dim = (n_cdfs, 1 + 1 + image_size)
numeric = cdfs.create_dataset('_numeric', shape=dim, dtype='float32')
# Load image patches and store numeric data.
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(coords, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
logging.debug('Fetching %d ATLAS images.', len(pix_coords))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - config['surveys']['atlas']['fits_height'] // 2:
int(y) + config['surveys']['atlas']['fits_height'] // 2,
int(x) - config['surveys']['atlas']['fits_width'] // 2:
int(x) + config['surveys']['atlas']['fits_width'] // 2]
numeric[index, 0] = coords[index][0]
numeric[index, 1] = coords[index][1]
numeric[index, 2:2 + image_size] = radio.reshape(-1)
logging.debug('ATLAS imported.')
def remove_nulls(n):
"""Swaps nulls with zeros."""
if n == 'null':
return 0
return n
def import_swire(f_h5, field='cdfs'):
"""Imports the SWIRE dataset into crowdastro.
f_h5: An HDF5 file.
field: 'cdfs' or 'elais'.
"""
names = []
rows = []
logging.debug('Reading SWIRE catalogue.')
with open(
config['data_sources']['swire_{}_catalogue'.format(field)]
) as f_tbl:
# This isn't a valid ASCII table, so Astropy can't handle it. This means
# we have to parse it manually.
if field == 'cdfs':
for _ in range(5): # Skip the first five lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 156
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 156
row = dict(zip(columns, row))
name = row['object']
ra = float(row['ra'])
dec = float(row['dec'])
flux_ap2_36 = float(remove_nulls(row['flux_ap2_36']))
flux_ap2_45 = float(remove_nulls(row['flux_ap2_45']))
flux_ap2_58 = float(remove_nulls(row['flux_ap2_58']))
flux_ap2_80 = float(remove_nulls(row['flux_ap2_80']))
flux_ap2_24 = float(remove_nulls(row['flux_ap2_24']))
stell_36 = float(remove_nulls(row['stell_36']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, flux_ap2_36, flux_ap2_45, flux_ap2_58,
flux_ap2_80, flux_ap2_24, stell_36, -1))
names.append(name)
elif field == 'elais':
for _ in range(121): # Skip the first 121 lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 54
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 54
row = dict(zip(columns, row))
name = row['object']
ra = float(row['ra'])
dec = float(row['dec'])
flux_ap2_36 = float(remove_nulls(row['flux_ap2_36']))
flux_ap2_45 = float(remove_nulls(row['flux_ap2_45']))
flux_ap2_58 = float(remove_nulls(row['flux_ap2_58']))
flux_ap2_80 = float(remove_nulls(row['flux_ap2_80']))
flux_ap2_24 = float(remove_nulls(row['flux_ap2_24']))
stell_36 = float(remove_nulls(row['stell_36']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, flux_ap2_36, flux_ap2_45, flux_ap2_58,
flux_ap2_80, flux_ap2_24, stell_36, -1))
names.append(name)
logging.debug('Found %d SWIRE objects.', len(names))
# Sort by name.
rows_to_names = dict(zip(rows, names))
rows.sort(key=rows_to_names.get)
names.sort()
names = numpy.array(names, dtype='<S{}'.format(MAX_NAME_LENGTH))
rows = numpy.array(rows)
# Filter on distance - only include image data for SWIRE objects within a
# given radius of an ATLAS object. Otherwise, there's way too much data to
# store.
swire_positions = rows[:, :2]
atlas_positions = f_h5['/atlas/{}/_numeric'.format(field)][:, :2]
logging.debug('Computing SWIRE k-d tree.')
swire_tree = sklearn.neighbors.KDTree(swire_positions, metric='euclidean')
indices = numpy.concatenate(
swire_tree.query_radius(atlas_positions, CANDIDATE_RADIUS))
indices = numpy.unique(indices)
logging.debug('Found %d SWIRE objects near ATLAS objects.', len(indices))
names = names[indices]
rows = rows[indices]
swire_positions = swire_positions[indices]
# Get distances.
logging.debug('Finding ATLAS-SWIRE object distances.')
distances = scipy.spatial.distance.cdist(atlas_positions, swire_positions,
'euclidean')
assert distances.shape[0] == atlas_positions.shape[0]
assert distances.shape[1] == swire_positions.shape[0]
logging.debug('Done finding distances.')
# Write numeric data to HDF5.
rows[:, 8] = distances.min(axis=0)
atlas_numeric = f_h5['/atlas/{}/_numeric'.format(field)]
f_h5['/atlas/{}'.format(field)].create_dataset(
'numeric', dtype='float32',
shape=(atlas_numeric.shape[0],
atlas_numeric.shape[1] + len(indices)))
f_h5['/atlas/{}/numeric'.format(field)][
:, :atlas_numeric.shape[1]] = atlas_numeric
f_h5['/atlas/{}/numeric'.format(field)][
:, atlas_numeric.shape[1]:] = distances
del f_h5['/atlas/{}/_numeric'.format(field)]
image_size = (PATCH_RADIUS * 2) ** 2
dim = (rows.shape[0], rows.shape[1] + image_size)
numeric = f_h5['/swire/{}'.format(field)].create_dataset(
'numeric', shape=dim, dtype='float32')
numeric[:, :rows.shape[1]] = rows
f_h5['/swire/{}'.format(field)].create_dataset('string', data=names)
# Load and store radio images.
logging.debug('Importing radio patches.')
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(swire_positions, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
assert pix_coords.shape[0] == len(indices)
logging.debug('Fetching %d ATLAS patches.', len(indices))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - PATCH_RADIUS:
int(y) + PATCH_RADIUS,
int(x) - PATCH_RADIUS:
int(x) + PATCH_RADIUS]
numeric[index, -image_size:] = radio.reshape(-1)
def import_wise(f_h5, field='cdfs'):
"""Imports the WISE dataset into crowdastro.
f_h5: An HDF5 file.
field: 'cdfs' or 'elais'.
"""
names = []
rows = []
logging.debug('Reading WISE catalogue.')
with open(
config['data_sources']['wise_{}_catalogue'.format(field)]) as f_tbl:
# This isn't a valid ASCII table, so Astropy can't handle it. This means
# we have to parse it manually.
for _ in range(105): # Skip the first 105 lines.
next(f_tbl)
# Get the column names.
columns = [c.strip() for c in next(f_tbl).strip().split('|')][1:-1]
assert len(columns) == 45
for _ in range(3): # Skip the next three lines.
next(f_tbl)
for row in f_tbl:
row = row.strip().split()
assert len(row) == 45
row = dict(zip(columns, row))
name = row['designation']
ra = float(row['ra'])
dec = float(row['dec'])
w1mpro = float(remove_nulls(row['w1mpro']))
w2mpro = float(remove_nulls(row['w2mpro']))
w3mpro = float(remove_nulls(row['w3mpro']))
w4mpro = float(remove_nulls(row['w4mpro']))
# Extra -1 is so we can store nearest distance later.
rows.append((ra, dec, w1mpro, w2mpro, w3mpro, w4mpro, -1))
names.append(name)
logging.debug('Found %d WISE objects.', len(names))
# Sort by name.
rows_to_names = dict(zip(rows, names))
rows.sort(key=rows_to_names.get)
names.sort()
names = numpy.array(names, dtype='<S{}'.format(MAX_NAME_LENGTH))
rows = numpy.array(rows)
# Filter on distance - only include image data for WISE objects within a
# given radius of an ATLAS object. Otherwise, there's way too much data to
# store.
wise_positions = rows[:, :2]
atlas_positions = f_h5['/atlas/{}/_numeric'.format(field)][:, :2]
logging.debug('Computing WISE k-d tree.')
wise_tree = sklearn.neighbors.KDTree(wise_positions, metric='euclidean')
indices = numpy.concatenate(
wise_tree.query_radius(atlas_positions, CANDIDATE_RADIUS))
indices = numpy.unique(indices)
logging.debug('Found %d WISE objects near ATLAS objects.', len(indices))
names = names[indices]
rows = rows[indices]
wise_positions = wise_positions[indices]
# Get distances.
logging.debug('Finding ATLAS-WISE object distances.')
distances = scipy.spatial.distance.cdist(atlas_positions, wise_positions,
'euclidean')
assert distances.shape[0] == atlas_positions.shape[0]
assert distances.shape[1] == wise_positions.shape[0]
logging.debug('Done finding distances.')
# Write numeric data to HDF5.
rows[:, 6] = distances.min(axis=0)
atlas_numeric = f_h5['/atlas/{}/_numeric'.format(field)]
f_h5['/atlas/{}'.format(field)].create_dataset(
'numeric', dtype='float32',
shape=(atlas_numeric.shape[0],
atlas_numeric.shape[1] + len(indices)))
numeric_f = f_h5['/atlas/{}/numeric'.format(field)]
numeric_f[:, :atlas_numeric.shape[1]] = atlas_numeric
numeric_f[:, atlas_numeric.shape[1]:] = distances
del f_h5['/atlas/{}/_numeric'.format(field)]
image_size = (PATCH_RADIUS * 2) ** 2
dim = (rows.shape[0], rows.shape[1] + image_size)
numeric = f_h5['/wise/{}'.format(field)].create_dataset(
'numeric', shape=dim, dtype='float32')
numeric[:, :rows.shape[1]] = rows
f_h5['/wise/{}'.format(field)].create_dataset('string', data=names)
# Load and store radio images.
logging.debug('Importing radio patches.')
with astropy.io.fits.open(
config['data_sources']['atlas_{}_image'.format(field)],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
pix_coords = wcs.all_world2pix(wise_positions, FITS_CONVENTION)
assert pix_coords.shape[1] == 2
assert pix_coords.shape[0] == len(indices)
logging.debug('Fetching %d ATLAS patches.', len(indices))
for index, (x, y) in enumerate(pix_coords):
radio = atlas_image[0].data[
0, 0, # stokes, freq
int(y) - PATCH_RADIUS:
int(y) + PATCH_RADIUS,
int(x) - PATCH_RADIUS:
int(x) + PATCH_RADIUS]
numeric[index, -image_size:] = radio.reshape(-1)
def import_norris(f_h5):
"""Imports the Norris et al. (2006) labels.
f_h5: crowdastro HDF5 file with WISE or SWIRE already imported.
"""
ir_survey = f_h5.attrs['ir_survey']
ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
norris_dat = astropy.io.ascii.read(config['data_sources']['norris_coords'])
norris_swire = norris_dat['SWIRE']
norris_coords = []
for s in norris_swire:
s = s.strip()
if len(s) < 19:
continue
# e.g. J032931.44-281722.0
ra_hr = s[1:3]
ra_min = s[3:5]
ra_sec = s[5:10]
dec_sgn = s[10]
dec_deg = s[11:13]
dec_min = s[13:15]
dec_sec = s[15:19]
ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec)
dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec)
logging.debug('Reading Norris coordinate: {}; {}'.format(ra, dec))
coord = SkyCoord(ra=ra, dec=dec,
unit=('hourangle, deg'))
norris_coords.append(coord)
norris_labels = numpy.zeros((ir_positions.shape[0],))
for skycoord in norris_coords:
# Find a neighbour.
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < config['surveys'][ir_survey]['distance_cutoff']:
norris_labels[ir] = 1
f_h5.create_dataset('/{}/cdfs/norris_labels'.format(ir_survey),
data=norris_labels)
def import_fan(f_h5):
"""Imports the Fan et al. (2015) labels.
f_h5: crowdastro HDF5 file with WISE or SWIRE already imported.
"""
ir_survey = f_h5.attrs['ir_survey']
ir_names = f_h5['/{}/cdfs/string'.format(ir_survey)]
ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
fan_coords = []
with open(config['data_sources']['fan_swire'], 'r') as fan_dat:
for row in csv.DictReader(fan_dat):
ra_hr = row['swire'][8:10]
ra_min = row['swire'][10:12]
ra_sec = row['swire'][12:17]
dec_sgn = row['swire'][17]
dec_deg = row['swire'][18:20]
dec_min = row['swire'][20:22]
dec_sec = row['swire'][22:26]
ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec)
dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec)
fan_coords.append((ra, dec))
fan_labels = numpy.zeros((ir_positions.shape[0],))
for ra, dec in fan_coords:
# Find a neighbour.
skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < config['surveys'][ir_survey]['distance_cutoff']:
fan_labels[ir] = 1
f_h5.create_dataset('/{}/cdfs/fan_labels'.format(ir_survey),
data=fan_labels)
def contains(bbox, point):
"""Checks if point is within bbox.
bbox: [[x0, x1], [y0, y1]]
point: [x, y]
-> bool
"""
return (bbox[0][0] <= point[0] <= bbox[0][1] and
bbox[1][0] <= point[1] <= bbox[1][1])
bbox_cache_ = {} # Should help speed up ATLAS membership checking.
def make_radio_combination_signature(radio_annotation, wcs, atlas_positions,
subject, pix_offset):
"""Generates a unique signature for a radio annotation.
radio_annotation: 'radio' dictionary from a classification.
wcs: World coordinate system associated with the ATLAS image.
atlas_positions: [[RA, DEC]] NumPy array.
subject: RGZ subject dict.
pix_offset: (x, y) pixel position of this radio subject on the ATLAS image.
-> Something immutable
"""
from . import rgz_data as data
# TODO(MatthewJA): This only works on ATLAS. Generalise.
# My choice of immutable object will be stringified crowdastro ATLAS
# indices.
zooniverse_id = subject['zooniverse_id']
subject_fits = data.get_radio_fits(subject)
subject_wcs = astropy.wcs.WCS(subject_fits.header)
atlas_ids = []
x_offset, y_offset = pix_offset
for c in radio_annotation.values():
# Note that the x scale is not the same as the IR scale, but the scale
# factor is included in the annotation, so I have multiplied this out
# here for consistency.
scale_width = c.get('scale_width', '')
scale_height = c.get('scale_height', '')
if scale_width:
scale_width = float(scale_width)
else:
# Sometimes, there's no scale, so I've included a default scale.
scale_width = config['surveys']['atlas']['scale_width']
if scale_height:
scale_height = float(scale_height)
else:
scale_height = config['surveys']['atlas']['scale_height']
# These numbers are in terms of the PNG images, so I need to multiply by
# the click-to-fits ratio.
scale_width *= config['surveys']['atlas']['click_to_fits_x']
scale_height *= config['surveys']['atlas']['click_to_fits_y']
subject_bbox = [
[
float(c['xmin']) * scale_width,
float(c['xmax']) * scale_width,
],
[
float(c['ymin']) * scale_height,
float(c['ymax']) * scale_height,
],
]
# ...and by the mosaic ratio. There's probably double-up here, but this
# makes more sense.
scale_width *= config['surveys']['atlas']['mosaic_scale_x']
scale_height *= config['surveys']['atlas']['mosaic_scale_y']
# Get the bounding box of the radio source in pixels.
# Format: [xs, ys]
bbox = [
[
float(c['xmin']) * scale_width,
float(c['xmax']) * scale_width,
],
[
float(c['ymin']) * scale_height,
float(c['ymax']) * scale_height,
],
]
assert bbox[0][0] < bbox[0][1]
assert bbox[1][0] < bbox[1][1]
# Convert the bounding box into RA/DEC.
bbox = wcs.all_pix2world(bbox[0] + x_offset, bbox[1] + y_offset,
FITS_CONVENTION)
subject_bbox = subject_wcs.all_pix2world(subject_bbox[0],
subject_bbox[1], FITS_CONVENTION)
# TODO(MatthewJA): Remove (or disable) this sanity check.
# The bbox is backwards along the x-axis for some reason.
bbox[0] = bbox[0][::-1]
assert bbox[0][0] < bbox[0][1]
assert bbox[1][0] < bbox[1][1]
bbox = numpy.array(bbox)
# What is this radio source called? Check if we have an object in the
# bounding box. We'll cache these results because there is a lot of
# overlap.
cache_key = tuple(tuple(b) for b in bbox)
if cache_key in bbox_cache_:
index = bbox_cache_[cache_key]
else:
x_gt_min = atlas_positions[:, 0] >= bbox[0, 0]
x_lt_max = atlas_positions[:, 0] <= bbox[0, 1]
y_gt_min = atlas_positions[:, 1] >= bbox[1, 0]
y_lt_max = atlas_positions[:, 1] <= bbox[1, 1]
within = numpy.all([x_gt_min, x_lt_max, y_gt_min, y_lt_max], axis=0)
indices = numpy.where(within)[0]
if len(indices) == 0:
logging.debug('Skipping radio source not in catalogue for '
'%s', zooniverse_id)
continue
else:
if len(indices) > 1:
logging.debug('Found multiple (%d) ATLAS matches '
'for %s', len(indices), zooniverse_id)
index = indices[0]
bbox_cache_[cache_key] = index
atlas_ids.append(str(index))
atlas_ids.sort()
if not atlas_ids:
raise CatalogueError('No catalogued radio sources.')
return ';'.join(atlas_ids)
def parse_classification(classification, subject, atlas_positions, wcs,
pix_offset):
"""Converts a raw RGZ classification into a classification dict.
Scales all positions and flips y axis of clicks.
classification: RGZ classification dict.
subject: Associated RGZ subject dict.
atlas_positions: [[RA, DEC]] NumPy array.
wcs: World coordinate system of the ATLAS image.
pix_offset: (x, y) pixel position of this radio subject on the ATLAS image.
-> dict mapping radio signature to list of corresponding IR host pixel
locations.
"""
result = {}
n_invalid = 0
for annotation in classification['annotations']:
if 'radio' not in annotation:
# This is a metadata annotation and we can ignore it.
continue
if annotation['radio'] == 'No Contours':
# I'm not sure how this occurs. I'm going to ignore it.
continue
try:
radio_signature = make_radio_combination_signature(
annotation['radio'], wcs, atlas_positions,
subject, pix_offset)
except CatalogueError:
# Ignore invalid annotations.
n_invalid += 1
logging.debug('Ignoring invalid annotation for %s.',
subject['zooniverse_id'])
continue
ir_locations = []
if annotation['ir'] != 'No Sources':
for ir_click in annotation['ir']:
ir_x = float(annotation['ir'][ir_click]['x'])
ir_y = float(annotation['ir'][ir_click]['y'])
# Rescale to a consistent size.
ir_x *= config['surveys']['atlas']['click_to_fits_x']
ir_y *= config['surveys']['atlas']['click_to_fits_y']
# Ignore out-of-range data.
if not 0 <= ir_x <= config['surveys']['atlas']['fits_width']:
n_invalid += 1
continue
if not 0 <= ir_y <= config['surveys']['atlas']['fits_height']:
n_invalid += 1
continue
# Flip the y axis to match other data conventions.
ir_y = config['surveys']['atlas']['fits_height'] - ir_y
# Rescale to match the mosaic WCS.
ir_x *= config['surveys']['atlas']['mosaic_scale_x']
ir_y *= config['surveys']['atlas']['mosaic_scale_y']
# Move to the reference location of the radio subject.
ir_x += pix_offset[0]
ir_y += pix_offset[1]
# Convert the location into RA/DEC.
(ir_x,), (ir_y,) = wcs.wcs_pix2world([ir_x], [ir_y], 1)
ir_location = (ir_x, ir_y)
ir_locations.append(ir_location)
result[radio_signature] = ir_locations
if n_invalid:
logging.debug('%d invalid annotations for %s.', n_invalid,
subject['zooniverse_id'])
return result
def import_classifications(f_h5, test=False):
"""Imports Radio Galaxy Zoo classifications into crowdastro.
f_h5: An HDF5 file.
test: Flag to run on only 10 subjects. Default False.
"""
# TODO(MatthewJA): This only works for ATLAS/CDFS. Generalise.
from . import rgz_data as data
atlas_positions = f_h5['/atlas/cdfs/numeric'][:, :2]
atlas_ids = f_h5['/atlas/cdfs/string']['zooniverse_id']
classification_positions = []
classification_combinations = []
classification_usernames = []
with astropy.io.fits.open(
# RGZ only has cdfs classifications
config['data_sources']['atlas_cdfs_image'],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
for obj_index, atlas_id in enumerate(atlas_ids):
subject = data.get_subject(atlas_id.decode('ascii'))
assert subject['zooniverse_id'] == atlas_ids[obj_index].decode('ascii')
classifications = data.get_subject_classifications(subject)
offset, = wcs.all_world2pix([subject['coords']], FITS_CONVENTION)
# The coords are of the middle of the subject.
offset[0] -= (config['surveys']['atlas']['fits_width'] *
config['surveys']['atlas']['mosaic_scale_x'] // 2)
offset[1] -= (config['surveys']['atlas']['fits_height'] *
config['surveys']['atlas']['mosaic_scale_y'] // 2)
for c_index, classification in enumerate(classifications):
user_name = classification.get('user_name', '').encode(
'ascii', errors='ignore')
# Usernames actually don't have an upper length limit on RGZ(?!) so
# I'll cap everything at 50 characters for my own sanity.
if len(user_name) > 50:
user_name = user_name[:50]
classification = parse_classification(classification, subject,
atlas_positions, wcs, offset)
full_radio = '|'.join(classification.keys())
for radio, locations in classification.items():
if not locations:
locations = [(None, None)]
for click_index, location in enumerate(locations):
# Check whether the click index is 0 to maintain the
# assumption that we only need the first click.
pos_row = (obj_index, location[0], location[1],
click_index == 0)
com_row = (obj_index, full_radio, radio)
# A little redundancy here with the index, but we can assert
# that they are the same later to check integrity.
classification_positions.append(pos_row)
classification_combinations.append(com_row)
classification_usernames.append(user_name)
combinations_dtype = [('index', 'int'),
('full_signature', '<S{}'.format(
MAX_RADIO_SIGNATURE_LENGTH)),
('signature', '<S{}'.format(
MAX_RADIO_SIGNATURE_LENGTH))]
classification_positions = numpy.array(classification_positions,
dtype=float)
classification_combinations = numpy.array(classification_combinations,
dtype=combinations_dtype)
f_h5['/atlas/cdfs/'].create_dataset('classification_positions',
data=classification_positions,
dtype=float)
f_h5['/atlas/cdfs/'].create_dataset('classification_usernames',
data=classification_usernames,
dtype='<S50')
f_h5['/atlas/cdfs/'].create_dataset('classification_combinations',
data=classification_combinations,
dtype=combinations_dtype)
def _populate_parser(parser):
parser.description = 'Imports and standardises data into crowdastro.'
parser.add_argument('--h5', default='data/crowdastro.h5',
help='HDF5 output file')
parser.add_argument('--test', action='store_true', default=False,
help='Run with a small number of subjects',)
parser.add_argument('--ir', choices={'swire', 'wise'},
default='swire', help='which infrared survey to use')
def check_raw_data():
"""Validates existence and correctness of raw data files."""
for source, filename in config['data_sources'].items():
if source == 'radio_galaxy_zoo_db':
# Skip the MongoDB name.
continue
if not os.path.exists(filename):
logging.error(
'{} expected at {} but not found'.format(source, filename))
if source in config['data_checksums']:
valid = checksum_file(filename, config['data_checksums'][source])
if not valid:
logging.error('{} has incorrect hash'.format(filename))
else:
logging.debug('{} has correct hash'.format(filename))
def _main(args):
check_raw_data()
with h5py.File(args.h5, 'w') as f_h5:
prep_h5(f_h5, args.ir)
import_atlas(f_h5, test=args.test, field='cdfs')
import_atlas(f_h5, test=args.test, field='elais')
if args.ir == 'swire':
import_swire(f_h5, field='cdfs')
import_swire(f_h5, field='elais')
elif args.ir == 'wise':
import_wise(f_h5, field='cdfs')
import_wise(f_h5, field='elais')
import_norris(f_h5)
import_fan(f_h5)
import_classifications(f_h5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
_populate_parser(parser)
args = parser.parse_args()
_main(args)
|
mit
|
public-ink/public-ink
|
server/appengine/lib/matplotlib/tri/triplot.py
|
21
|
3124
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
|
gpl-3.0
|
zhenv5/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
311
|
1529
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
tcm129/trading-with-python
|
lib/extra.py
|
77
|
2540
|
'''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
|
bsd-3-clause
|
tensorflow/lattice
|
examples/keras_functional_uci_heart.py
|
1
|
13114
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Example usage of TFL within Keras Functional API.
This example builds and trains a calibrated lattice model for the UCI heart
dataset.
"Calibrated lattice" is a commonly used architecture for datasets where number
of input features does not exceed ~15.
"Calibrated lattice" assumes every feature being transformed by PWLCalibration
or CategoricalCalibration layers before nonlineary fusing result of calibration
within a lattice layer.
The TFL package does not have any layers dedicated to processing of sparse
features but thanks to plug and play compatibility with any other Keras layers
we can take advantage of standard Keras embedding to handle sparse features. UCI
Heart dataset does not have any sparse features so for this example we replaced
PWLCalibration layer for feature 'age' with Embedding layer in order to
demonstrate such compatibility as well as advantage of monotonicity
constraints for semantically meaningful features.
Generally when you manually combine TFL layers you should keep track of:
1) Ensuring that inputs to TFL layers are within expected range.
- Input range for PWLCalibration layer is defined by smallest and largest of
provided keypoints.
- Input range for Lattice layer is [0.0, lattice_sizes[d] - 1.0] for any
dimension d.
TFL layers can constraint their output to be within desired range. Feeding
output of other layers into TFL layers you might want to ensure that something
like sigmoid is used to constraint their output range.
2) Properly configure monotonicity. If your calibration layer is monotonic then
corresponding dimension of lattice layer should also be monotonic.
This example uses functional API for Keras model construction. For an example of
sequential models with TFL layers see keras_sequential_uci_heart.py.
In order to see how better generalization can be achieved with a properly
constrained PWLCalibration layer compared to a vanila embedding layer, compare
training and validation losses of this model with one defined in
keras_sequential_uci_heart.py
Note that the specifics of layer configurations are for demonstration purposes
and might not result in optimal performance.
Example usage:
keras_functional_uci_heart
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import tensorflow_lattice as tfl
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_epochs', 200, 'Number of training epoch.')
def main(_):
# UCI Statlog (Heart) dataset.
csv_file = tf.keras.utils.get_file(
'heart.csv',
'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
training_data_df = pd.read_csv(csv_file).sample(
frac=1.0, random_state=41).reset_index(drop=True)
# Feature columns.
# 0 age
# 1 sex
# 2 cp chest pain type (4 values)
# 3 trestbps resting blood pressure
# 4 chol serum cholestoral in mg/dl
# 5 fbs fasting blood sugar > 120 mg/dl
# 6 restecg resting electrocardiographic results (values 0,1,2)
# 7 thalach maximum heart rate achieved
# 8 exang exercise induced angina
# 9 oldpeak ST depression induced by exercise relative to rest
# 10 slope the slope of the peak exercise ST segment
# 11 ca number of major vessels (0-3) colored by flourosopy
# 12 thal 3 = normal; 6 = fixed defect; 7 = reversable defect
# Example slice of training data:
# age sex cp trestbps chol fbs restecg thalach exang oldpeak
# 0 63 1 1 145 233 1 2 150 0 2.3
# 1 67 1 4 160 286 0 2 108 1 1.5
# 2 67 1 4 120 229 0 2 129 1 2.6
# 3 37 1 3 130 250 0 0 187 0 3.5
# 4 41 0 2 130 204 0 2 172 0 1.4
# 5 56 1 2 120 236 0 0 178 0 0.8
# 6 62 0 4 140 268 0 2 160 0 3.6
# 7 57 0 4 120 354 0 0 163 1 0.6
# 8 63 1 4 130 254 0 2 147 0 1.4
# 9 53 1 4 140 203 1 2 155 1 3.1
model_inputs = []
lattice_inputs = []
# We are going to have 2-d embedding as one of lattice inputs.
lattice_sizes_for_embedding = [2, 3]
lattice_sizes = lattice_sizes_for_embedding + [2, 2, 3, 3, 2, 2]
# ############### age ###############
age_input = keras.layers.Input(shape=[1])
model_inputs.append(age_input)
age_embedding = keras.layers.Embedding(
input_dim=10,
output_dim=len(lattice_sizes_for_embedding),
embeddings_initializer=keras.initializers.RandomNormal(seed=1))(
age_input)
# Flatten to get rid of redundant tensor dimension created by embedding layer.
age_embedding = keras.layers.Flatten()(age_embedding)
# Lattice expects input data for lattice dimension d to be within
# [0, lattice_sizes[d]-1.0]. Apply sigmoid and multiply it by input range to
# ensure that lattice inputs are within expected range.
embedding_lattice_input_range = tf.constant(
[size - 1.0 for size in lattice_sizes_for_embedding],
# Insert dimension of size 1 in front to ensure that batch dimension
# will not collapse as result of multiplication.
shape=(1, 2))
age_ranged = keras.layers.multiply(
[keras.activations.sigmoid(age_embedding), embedding_lattice_input_range])
lattice_inputs.append(age_ranged)
# ############### sex ###############
# For boolean features simply specify CategoricalCalibration layer with 2
# buckets.
sex_input = keras.layers.Input(shape=[1])
model_inputs.append(sex_input)
sex_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[2] - 1.0,
# Initializes all outputs to (output_min + output_max) / 2.0.
kernel_initializer='constant',
)(
sex_input)
lattice_inputs.append(sex_calibrator)
# ############### cp ###############
cp_input = keras.layers.Input(shape=[1])
model_inputs.append(cp_input)
cp_calibrator = tfl.layers.PWLCalibration(
# Here instead of specifying dtype of layer we convert keypoints into
# np.float32.
input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),
output_min=0.0,
output_max=lattice_sizes[3] - 1.0,
monotonicity='increasing',
# You can specify TFL regularizers as tuple ('regularizer name', l1, l2).
kernel_regularizer=('hessian', 0.0, 1e-4))(
cp_input)
lattice_inputs.append(cp_calibrator)
# ############### trestbps ###############
trestbps_input = keras.layers.Input(shape=[1])
model_inputs.append(trestbps_input)
trestbps_calibrator = tfl.layers.PWLCalibration(
# Alternatively to uniform keypoints you might want to use quantiles as
# keypoints.
input_keypoints=np.quantile(training_data_df['trestbps'],
np.linspace(0.0, 1.0, num=5)),
dtype=tf.float32,
# Together with quantile keypoints you might want to initialize piecewise
# linear function to have 'equal_slopes' in order for output of layer
# after initialization to preserve original distribution.
kernel_initializer='equal_slopes',
output_min=0.0,
output_max=lattice_sizes[4] - 1.0,
# You might consider clamping extreme inputs of the calibrator to output
# bounds.
clamp_min=True,
clamp_max=True,
monotonicity='increasing',
)(
trestbps_input)
lattice_inputs.append(trestbps_calibrator)
# ############### chol ###############
chol_input = keras.layers.Input(shape=[1])
model_inputs.append(chol_input)
chol_calibrator = tfl.layers.PWLCalibration(
# Explicit input keypoint initialization.
input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
output_min=0.0,
output_max=lattice_sizes[5] - 1.0,
# Monotonicity of calibrator can be decreasing. Note that corresponding
# lattice dimension must have INCREASING monotonicity regardless of
# monotonicity direction of calibrator.
# Its not some weird configuration hack. Its just how math works :)
monotonicity='decreasing',
# Convexity together with decreasing monotonicity result in diminishing
# return constraint.
convexity='convex',
# You can specify list of regularizers. You are not limited to TFL
# regularizrs. Feel free to use any :)
kernel_regularizer=[('laplacian', 0.0, 1e-4),
keras.regularizers.l1_l2(l1=0.001)])(
chol_input)
lattice_inputs.append(chol_calibrator)
# ############### fbs ###############
fbs_input = keras.layers.Input(shape=[1])
model_inputs.append(fbs_input)
fbs_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[6] - 1.0,
# For categorical calibration layer monotonicity is specified for pairs
# of indices of categories. Output for first category in pair will be
# smaller than output for second category.
#
# Don't forget to set monotonicity of corresponding dimension of Lattice
# layer to 'increasing'.
monotonicities=[(0, 1)],
# This initializer is identical to default one ('uniform'), but has fixed
# seed in order to simplify experimentation.
kernel_initializer=keras.initializers.RandomUniform(
minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1),
)(
fbs_input)
lattice_inputs.append(fbs_calibrator)
# ############### restecg ###############
restecg_input = keras.layers.Input(shape=[1])
model_inputs.append(restecg_input)
restecg_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=3,
output_min=0.0,
output_max=lattice_sizes[7] - 1.0,
# Categorical monotonicity can be partial order.
monotonicities=[(0, 1), (0, 2)],
# Categorical calibration layer supports standard Keras regularizers.
kernel_regularizer=keras.regularizers.l1_l2(l1=0.001),
kernel_initializer='constant',
)(
restecg_input)
lattice_inputs.append(restecg_calibrator)
# Lattice inputs must be either list of d tensors of rank (batch_size, 1) or
# single tensor of rank (batch_size, d) where d is dimensionality of lattice.
# Since our embedding layer has size 2 in second dimension - concatenate all
# of inputs to create single tensor.
lattice_inputs_tensor = keras.layers.concatenate(lattice_inputs, axis=1)
# Create Lattice layer to nonlineary fuse output of calibrators. Don't forget
# to specify 'increasing' monotonicity for any dimension for which
# monotonicity is configured regardless of monotonicity direction of those.
# This includes partial monotonicity of CategoricalCalibration layer.
# Note that making embedding inputs monotonic does not make sense.
lattice = tfl.layers.Lattice(
lattice_sizes=lattice_sizes,
monotonicities=[
'none', 'none', 'none', 'increasing', 'increasing', 'increasing',
'increasing', 'increasing'
],
output_min=0.0,
output_max=1.0,
)(
lattice_inputs_tensor)
model = keras.models.Model(inputs=model_inputs, outputs=lattice)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=keras.optimizers.Adagrad(learning_rate=1.0))
feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg']
features = np.split(
training_data_df[feature_names].values.astype(np.float32),
indices_or_sections=len(feature_names),
axis=1)
target = training_data_df[['target']].values.astype(np.float32)
# Bucketize input for embedding.
embedding_bins = np.quantile(
features[0],
# 10 keypoints will produce 9 dims numbered 1..9 to match embedding input
# size of 10.
np.linspace(0.0, 1.0, num=10, dtype=np.float32))
# Ensure that highest age will get into last bin rather than its own one.
embedding_bins[-1] += 1.0
features[0] = np.digitize(features[0], bins=embedding_bins)
model.fit(
features,
target,
batch_size=32,
epochs=FLAGS.num_epochs,
validation_split=0.2,
shuffle=False)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
|
alexsavio/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
11
|
43054
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Can't handle mix of binary and continuous",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_score)
else:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
bsd-3-clause
|
phayne/heat1d
|
python/setup.py
|
1
|
1307
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
setup(
name="heat1d",
version="0.3.2",
description="Thermal model for planetary science applications",
long_description=readme + "\n\n" + history,
author="Paul O. Hayne",
author_email="paul.hayne@lasp.colorado.edu",
maintainer_email="kmichael.aye@gmail.com",
url="https://github.com/phayne/heat1d",
packages=find_packages(),
entry_points={"console_scripts": ["heat1d=heat1d.cli:main"]},
package_dir={"heat1d": "heat1d"},
include_package_data=True,
install_requires=["Click>=6.0", "numpy", "matplotlib", "planets>=0.4.6",],
license="MIT license",
zip_safe=False,
keywords="heat1d",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
test_suite="tests",
tests_require=["pytest"],
setup_requires=["pytest_runner"],
)
|
mit
|
rhyolight/nupic.research
|
htmresearch/support/sequence_learning_utils.py
|
10
|
4876
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from matplotlib import pyplot as plt
def NRMSE(data, pred):
return np.sqrt(np.nanmean(np.square(pred-data)))/\
np.nanstd(data)
def NRMSE_sliding(data, pred, windowSize):
"""
Computing NRMSE in a sliding window
:param data:
:param pred:
:param windowSize:
:return: (window_center, NRMSE)
"""
halfWindowSize = int(round(float(windowSize)/2))
window_center = range(halfWindowSize, len(data)-halfWindowSize, int(round(float(halfWindowSize)/5.0)))
nrmse = []
for wc in window_center:
nrmse.append(NRMSE(data[wc-halfWindowSize:wc+halfWindowSize],
pred[wc-halfWindowSize:wc+halfWindowSize]))
return (window_center, nrmse)
def altMAPE(groundTruth, prediction):
error = abs(groundTruth - prediction)
altMAPE = 100.0 * np.sum(error) / np.sum(abs(groundTruth))
return altMAPE
def MAPE(groundTruth, prediction):
MAPE = np.nanmean(
np.abs(groundTruth - prediction)) / np.nanmean(np.abs(groundTruth))
return MAPE
def computeAltMAPE(truth, prediction, startFrom=0):
return np.nanmean(np.abs(truth[startFrom:] - prediction[startFrom:]))/np.nanmean(np.abs(truth[startFrom:]))
def computeNRMSE(truth, prediction, startFrom=0):
squareDeviation = computeSquareDeviation(prediction, truth)
squareDeviation[:startFrom] = None
return np.sqrt(np.nanmean(squareDeviation))/np.nanstd(truth)
def computeSquareDeviation(predictions, truth):
squareDeviation = np.square(predictions-truth)
return squareDeviation
def computeLikelihood(predictions, truth, encoder):
targetDistribution = np.zeros(predictions.shape)
for i in xrange(len(truth)):
if not np.isnan(truth[i]) and truth is not None:
targetDistribution[i, :] = encoder.encode(truth[i])
# calculate negative log-likelihood
Likelihood = np.multiply(predictions, targetDistribution)
Likelihood = np.sum(Likelihood, axis=1)
minProb = 0.01
Likelihood[np.where(Likelihood < minProb)[0]] = minProb
negLL = -np.log(Likelihood)
return negLL
def computeAbsouteError(predictions, truth):
return np.abs( (predictions-truth))
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
end = i+1
#
# start = i
# end = min(len(a), i + n)
# start = max(0, i-n/2)
# end = min(len(a), i+n/2)
#
values = a[start:end]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
def plotAccuracy(results, truth, train=None, window=100, label=None, params=None, errorType=None,
skipRecordNum = 5904):
plt.title('Prediction Error Over Time')
error = results[0]
x = results[1]
x = x[:len(error)]
# print results
# print params['compute_after']
# if params is not None:
# error[np.where(x < params['compute_after'])[0]] = np.nan
error[:skipRecordNum] = np.nan
movingData = movingAverage(error, min(len(error), window))
if errorType == 'square_deviation':
print label, " Avg NRMSE:", np.sqrt(np.nanmean(error))/np.nanstd(truth)
avgError = np.sqrt(np.array(movingData))/np.nanstd(truth)
elif errorType == 'negLL':
print label, " Avg negLL:", np.nanmean(error)
avgError = movingData
elif errorType == 'mape':
normFactor = np.nanmean(np.abs(truth))
print label, " MAPE:", np.nanmean(error) / normFactor
avgError = movingData / normFactor
else:
raise NotImplementedError
plt.plot(x, avgError, label=label)
plt.xlabel("# of elements seen")
plt.ylabel("{0} over last {1} record".format(errorType, window))
if train is not None:
for i in xrange(len(train)):
if train[i]:
plt.axvline(x[i], color='orange')
if params is not None:
if params['perturb_after'] < len(x):
plt.axvline(x[params['perturb_after']], color='black', linestyle='--')
plt.xlim(x[0], x[len(x)-1])
return movingData
# plt.ylim(0, 1.001)
|
gpl-3.0
|
jseabold/statsmodels
|
statsmodels/discrete/tests/test_sandwich_cov.py
|
4
|
22979
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 09 21:29:20 2013
Author: Josef Perktold
"""
import os
import numpy as np
import pandas as pd
import statsmodels.discrete.discrete_model as smd
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.genmod.families import links
from statsmodels.regression.linear_model import OLS
from statsmodels.base.covtype import get_robustcov_results
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.tools import add_constant
from numpy.testing import assert_allclose, assert_equal, assert_
import statsmodels.tools._testing as smt
# get data and results as module global for now, TODO: move to class
from .results import results_count_robust_cluster as results_st
cur_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(cur_dir, "results", "ships.csv")
data_raw = pd.read_csv(filepath, index_col=False)
data = data_raw.dropna()
#mod = smd.Poisson.from_formula('accident ~ yr_con + op_75_79', data=dat)
# Do not use formula for tests against Stata because intercept needs to be last
endog = data['accident']
exog_data = data['yr_con op_75_79'.split()]
exog = add_constant(exog_data, prepend=False)
group = np.asarray(data['ship'], int)
exposure = np.asarray(data['service'])
# TODO get the test methods from regression/tests
class CheckCountRobustMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
if len(res1.params) == (len(res2.params) - 1):
# Stata includes lnalpha in table for NegativeBinomial
mask = np.ones(len(res2.params), np.bool_)
mask[-2] = False
res2_params = res2.params[mask]
res2_bse = res2.bse[mask]
else:
res2_params = res2.params
res2_bse = res2.bse
assert_allclose(res1._results.params, res2_params, 1e-4)
assert_allclose(self.bse_rob / self.corr_fact, res2_bse, 6e-5)
@classmethod
def get_robust_clu(cls):
res1 = cls.res1
cov_clu = sw.cov_cluster(res1, group)
cls.bse_rob = sw.se_cov(cov_clu)
cls.corr_fact = cls.get_correction_factor(res1)
@classmethod
def get_correction_factor(cls, results, sub_kparams=True):
mod = results.model
nobs, k_vars = mod.exog.shape
if sub_kparams:
# TODO: document why we adjust by k_params for some classes
# but not others.
k_params = len(results.params)
else:
k_params = 0
corr_fact = (nobs - 1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
return np.sqrt(corr_fact)
def test_oth(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1._results.llf, res2.ll, 1e-4)
assert_allclose(res1._results.llnull, res2.ll_0, 1e-4)
def test_ttest(self):
smt.check_ttest_tvalues(self.res1)
def test_waldtest(self):
smt.check_ftest_pvalues(self.res1)
class TestPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
debug = False
if debug:
# for debugging
cls.bse_nonrobust = cls.res1.bse.copy()
cls.res1 = res1 = mod.fit(disp=False)
cls.get_robust_clu()
cls.res3 = cls.res1
cls.bse_rob3 = cls.bse_rob.copy()
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
# TODO: refactor xxxFit to full testing results
class TestPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
# scaling of cov_params_default to match Stata
# TODO should the default be changed?
nobs, k_params = mod.exog.shape
# TODO: this is similar but not identical to logic in
# get_correction_factor; can we de-duplicate?
sc_fact = (nobs-1.) / float(nobs - k_params)
cls.res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
scaling_factor=1. / sc_fact,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
# backwards compatibility with inherited test methods
cls.corr_fact = 1
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.tvalues, res2.tvalues, rtol=rtol, atol=1e-8)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestPoissonHC1FitExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_hc1
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestPoissonCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluExposureGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse #sw.se_cov(cov_clu)
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
cls.get_robust_clu()
class TestGLMPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit()
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
# TODO: refactor xxxFit to full testing results
class TestGLMPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit(cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit(cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestNegbinClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = mod.fit(disp=False, gtol=1e-7)
cls.get_robust_clu()
class TestNegbinCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
# mod_nbe = smd.NegativeBinomial(endog, exog, exposure=data['service'])
# res_nbe = mod_nbe.fit()
# mod_nb = smd.NegativeBinomial(endog, exog)
# res_nb = mod_nb.fit()
#
# cov_clu_nb = sw.cov_cluster(res_nb, group)
# k_params = k_vars + 1
# print sw.se_cov(cov_clu_nb / ((nobs-1.) / float(nobs - k_params)))
#
# wt = res_nb.wald_test(np.eye(len(res_nb.params))[1:3], cov_p=cov_clu_nb/((nobs-1.) / float(nobs - k_params)))
# print wt
#
# print dir(results_st)
class TestNegbinCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, gtol=1e-7)
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestNegbinCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
gtol=1e-7)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestNegbinCluExposureFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class CheckDiscreteGLM(object):
# compare GLM with other models, no verified reference results
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_equal(res1.cov_type, self.cov_type)
assert_equal(res2.cov_type, self.cov_type)
rtol = getattr(res1, 'rtol', 1e-13)
assert_allclose(res1.params, res2.params, rtol=rtol)
assert_allclose(res1.bse, res2.bse, rtol=1e-10)
class TestGLMLogit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Logit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMProbit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial(link=links.probit()))
cls.res1 = mod1.fit(method='newton',
cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Probit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
cls.rtol = 1e-6
def test_score_hessian(self):
res1 = self.res1
res2 = self.res2
# Note scale is fixed at 1, so we do not need to fix it explicitly
score1 = res1.model.score(res1.params * 0.98)
score2 = res2.model.score(res1.params * 0.98)
assert_allclose(score1, score2, rtol=1e-13)
hess1 = res1.model.hessian(res1.params)
hess2 = res2.model.hessian(res1.params)
assert_allclose(hess1, hess2, rtol=1e-13)
class TestGLMGaussNonRobust(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'nonrobust'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit()
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit()
class TestGLMGaussClu(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'cluster'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussHC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HC0'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HC0')
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HC0')
class TestGLMGaussHAC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'maxlags':2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
class TestGLMGaussHAC2(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
# check kernel specified as string
kwds = {'kernel': 'bartlett', 'maxlags': 2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
kwds2 = {'maxlags': 2}
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds2)
class TestGLMGaussHACUniform(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'kernel':sw.weights_uniform, 'maxlags':2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
#for debugging
cls.res3 = mod2.fit(cov_type='HAC', cov_kwds={'maxlags':2})
def test_cov_options(self):
# check keyword `weights_func
kwdsa = {'weights_func': sw.weights_uniform, 'maxlags': 2}
res1a = self.res1.model.fit(cov_type='HAC', cov_kwds=kwdsa)
res2a = self.res2.model.fit(cov_type='HAC', cov_kwds=kwdsa)
assert_allclose(res1a.bse, self.res1.bse, rtol=1e-12)
assert_allclose(res2a.bse, self.res2.bse, rtol=1e-12)
# regression test for bse values
bse = np.array([ 2.82203924, 4.60199596, 11.01275064])
assert_allclose(res1a.bse, bse, rtol=1e-6)
assert_(res1a.cov_kwds['weights_func'] is sw.weights_uniform)
kwdsb = {'kernel': sw.weights_bartlett, 'maxlags': 2}
res1a = self.res1.model.fit(cov_type='HAC', cov_kwds=kwdsb)
res2a = self.res2.model.fit(cov_type='HAC', cov_kwds=kwdsb)
assert_allclose(res1a.bse, res2a.bse, rtol=1e-12)
# regression test for bse values
bse = np.array([ 2.502264, 3.697807, 9.193303])
assert_allclose(res1a.bse, bse, rtol=1e-6)
class TestGLMGaussHACUniform2(TestGLMGaussHACUniform):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'kernel': sw.weights_uniform, 'maxlags': 2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
# check kernel as string
mod2 = OLS(endog, exog)
kwds2 = {'kernel': 'uniform', 'maxlags': 2}
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
class TestGLMGaussHACPanel(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-panel'
# time index is just made up to have a test case
time = np.tile(np.arange(7), 5)[:-1]
mod1 = GLM(endog.copy(), exog.copy(), family=families.Gaussian())
kwds = dict(time=time,
maxlags=2,
kernel=sw.weights_uniform,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-panel', cov_kwds=kwds)
cls.res1b = mod1.fit(cov_type='nw-panel', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-panel', cov_kwds=kwds)
def test_kwd(self):
# test corrected keyword name
assert_allclose(self.res1b.bse, self.res1.bse, rtol=1e-12)
class TestGLMGaussHACPanelGroups(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-panel'
# time index is just made up to have a test case
groups = np.repeat(np.arange(5), 7)[:-1]
mod1 = GLM(endog.copy(), exog.copy(), family=families.Gaussian())
kwds = dict(groups=pd.Series(groups), # check for #3606
maxlags=2,
kernel=sw.weights_uniform,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-panel', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-panel', cov_kwds=kwds)
class TestGLMGaussHACGroupsum(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-groupsum'
# time index is just made up to have a test case
time = np.tile(np.arange(7), 5)[:-1]
mod1 = GLM(endog, exog, family=families.Gaussian())
kwds = dict(time=pd.Series(time), # check for #3606
maxlags=2,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-groupsum', cov_kwds=kwds)
cls.res1b = mod1.fit(cov_type='nw-groupsum', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-groupsum', cov_kwds=kwds)
def test_kwd(self):
# test corrected keyword name
assert_allclose(self.res1b.bse, self.res1.bse, rtol=1e-12)
|
bsd-3-clause
|
ChristianSch/skml
|
skml/problem_transformation/classifier_chain.py
|
1
|
3449
|
import numpy as np
from sklearn.base import clone
from sklearn.base import BaseEstimator, MetaEstimatorMixin, ClassifierMixin
from sklearn.utils import validation
class ClassifierChain(BaseEstimator, MetaEstimatorMixin, ClassifierMixin):
"""
This classifier constructs a chain of classifiers for multi-label
classification (MLC).
For each label (which ought to be given as a binary label indicator vector,
where 0 stands for "instance does not have label, 1 otherwise") a
classifier is trained. The classifier predicts one label, and one only
(called binary relevance). The first classifier predicts the first label of
the label vector (which outputs 0 or 1), whereas the second predicts the
second label, but the first label is appended to the feature vector (X[i]).
The n-th classifier predicts the n-th label given the feature vector,
where the (n-1)-th labels are appended to.
"""
def __init__(self, estimator):
"""Classifer Chain multi-label strategy
Builds a new classifier chain using the given classifier, which is
copied :math:`|\mathcal{L}|` times (L is the set of labels).
Parameters
----------
estimator : scikit-learn compatible classifier instance
The classifier used to build a chain of classifiers.
Will be copied, hence the original will be left untouched.
"""
self.estimator = estimator
self.estimators_ = []
self.L = 0
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-label targets encoded as binary vectors.
"""
validation.check_X_y(X, y, multi_output=True)
y = validation.check_array(y, accept_sparse=True)
self.L = y.shape[1]
for i in range(self.L):
c = clone(self.estimator)
# at this point, all classifiers in the chain from the nodes before
# this one are fitted to the training data, including any
# subsequently predicted and appended labels.
if i == 0:
c.fit(X, y[:, 0])
y_pred = (c.predict(X)).reshape(-1, 1)
else:
# the classifiers that aren't the first classifiers in the
# chain use a transformed version of the features, where
# the previously predicted labels are appended.
stacked = np.hstack((X, y[:, :i]))
c.fit(stacked, y[:, i])
self.estimators_.append(c)
def predict(self, X):
"""
Predicts the labels for the given instances.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
array-like, shape = [n_samples, n_labels]
Estimated labels
"""
validation.check_is_fitted(self, 'estimators_')
for i, c in enumerate(self.estimators_):
if i == 0:
y_pred = (c.predict(X)).reshape(-1, 1)
else:
stacked = np.hstack((X, y_pred))
new_y = c.predict(stacked)
y_pred = np.hstack((y_pred, new_y.reshape(-1, 1)))
return y_pred
|
mit
|
brsu/Learning-And-Adaptivity
|
assignment_template/code/letters_ml.py
|
1
|
1141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 13 00:32:05 2014
@author: alex
"""
import numpy as np
import sklearn
from sklearn import tree
from sklearn.externals.six import StringIO
import pydot
import time
class ScalabilityQuestion(object):
def __init__(self):
self.prepare_data()
def prepare_data(self):
# Read the data from file
data = np.genfromtxt('data/letter-recognition/letter-recognition.data',
delimiter=',')
self.data = np.delete(data, 0, 1)
# The first column of data is of type string, amd wasn't read properly
data_raw = np.genfromtxt('data/letter-recognition/letter-recognition.data',
delimiter=',', dtype=None)
letters = list()
for i in xrange(data_raw.size):
letters.append(data_raw[i][0])
letters = np.array(letters)
# Now encode the strings
self.le = sklearn.preprocessing.LabelEncoder()
self.le.fit(letters)
self.encoded_letters = self.le.transform(letters)[:,np.newaxis]
|
mit
|
NICTA/dora
|
tests/test_sampler.py
|
1
|
2410
|
"""
PyTest Modele for Sampler Class Testing.
"""
import numpy as np
import os
def test_gp(update_ref_data):
"""
Test the GaussianProcess sampler.
This is a wrapper on the general test for the GaussianProcess method
See Also
--------
verify_common_samplers : The general sampler testing framework
"""
verify_common_samplers(update_ref_data=update_ref_data,
sampler_model='GaussianProcess')
def test_delaunay(update_ref_data):
"""
Test the Delaunay sampler.
This is a wrapper on the general test for the Delaunay method
See Also
--------
verify_common_samplers : The general sampler testing framework
"""
verify_common_samplers(update_ref_data=update_ref_data,
sampler_model='Delaunay')
def verify_common_samplers(update_ref_data=False,
sampler_model='GaussianProcess'):
"""
Test a general sampler's output.
For any implemented sampling method, this function tests if the final
collection of active sampled outputs are the same as before. If the
reference data is to be updated, it can also do so by setting the
corresponding fixture through 'py.test --update-ref-data=True'.
.. note ::
This will require the corresponding demonstration to return the
sampler instance to be tested. The demo cannot have a
'matplotlib.pyplot.show()' call or anything that pauses the script
(obviously). If the sampling method has a random element, a seed must
be set in the demo itself.
Parameters
----------
update_ref_data : bool, optional
To update the reference data or not
sampler_model : str, optional
The sampling method to test and verify
"""
from demos.demo_python_api import main
sampler = main(sampler_model=sampler_model)
cwd = os.path.dirname(__file__)
filename = '%s/data/ref_data_%s.npz' % (cwd, sampler_model)
if update_ref_data:
np.savez(filename,
X=sampler.X(),
y=sampler.y(),
v=sampler.virtual_flag())
else:
ref_data = np.load(filename)
# np.testing.assert_allclose(sampler.X(), ref_data['X'])
np.testing.assert_allclose(sampler.y(), ref_data['y'])
np.testing.assert_allclose(sampler.virtual_flag(), ref_data['v'])
|
apache-2.0
|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/gaussian_process/plot_gpr_prior_posterior.py
|
104
|
2878
|
"""
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
tylerjereddy/scipy
|
scipy/io/wavfile.py
|
10
|
26615
|
"""
Module to read / write wav files using NumPy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a NumPy array as a WAV file.
"""
import io
import sys
import numpy
import struct
import warnings
from enum import IntEnum
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
class WAVE_FORMAT(IntEnum):
"""
WAVE form wFormatTag IDs
Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the
newest additions, in v10.0.14393 2016-07
"""
UNKNOWN = 0x0000
PCM = 0x0001
ADPCM = 0x0002
IEEE_FLOAT = 0x0003
VSELP = 0x0004
IBM_CVSD = 0x0005
ALAW = 0x0006
MULAW = 0x0007
DTS = 0x0008
DRM = 0x0009
WMAVOICE9 = 0x000A
WMAVOICE10 = 0x000B
OKI_ADPCM = 0x0010
DVI_ADPCM = 0x0011
IMA_ADPCM = 0x0011 # Duplicate
MEDIASPACE_ADPCM = 0x0012
SIERRA_ADPCM = 0x0013
G723_ADPCM = 0x0014
DIGISTD = 0x0015
DIGIFIX = 0x0016
DIALOGIC_OKI_ADPCM = 0x0017
MEDIAVISION_ADPCM = 0x0018
CU_CODEC = 0x0019
HP_DYN_VOICE = 0x001A
YAMAHA_ADPCM = 0x0020
SONARC = 0x0021
DSPGROUP_TRUESPEECH = 0x0022
ECHOSC1 = 0x0023
AUDIOFILE_AF36 = 0x0024
APTX = 0x0025
AUDIOFILE_AF10 = 0x0026
PROSODY_1612 = 0x0027
LRC = 0x0028
DOLBY_AC2 = 0x0030
GSM610 = 0x0031
MSNAUDIO = 0x0032
ANTEX_ADPCME = 0x0033
CONTROL_RES_VQLPC = 0x0034
DIGIREAL = 0x0035
DIGIADPCM = 0x0036
CONTROL_RES_CR10 = 0x0037
NMS_VBXADPCM = 0x0038
CS_IMAADPCM = 0x0039
ECHOSC3 = 0x003A
ROCKWELL_ADPCM = 0x003B
ROCKWELL_DIGITALK = 0x003C
XEBEC = 0x003D
G721_ADPCM = 0x0040
G728_CELP = 0x0041
MSG723 = 0x0042
INTEL_G723_1 = 0x0043
INTEL_G729 = 0x0044
SHARP_G726 = 0x0045
MPEG = 0x0050
RT24 = 0x0052
PAC = 0x0053
MPEGLAYER3 = 0x0055
LUCENT_G723 = 0x0059
CIRRUS = 0x0060
ESPCM = 0x0061
VOXWARE = 0x0062
CANOPUS_ATRAC = 0x0063
G726_ADPCM = 0x0064
G722_ADPCM = 0x0065
DSAT = 0x0066
DSAT_DISPLAY = 0x0067
VOXWARE_BYTE_ALIGNED = 0x0069
VOXWARE_AC8 = 0x0070
VOXWARE_AC10 = 0x0071
VOXWARE_AC16 = 0x0072
VOXWARE_AC20 = 0x0073
VOXWARE_RT24 = 0x0074
VOXWARE_RT29 = 0x0075
VOXWARE_RT29HW = 0x0076
VOXWARE_VR12 = 0x0077
VOXWARE_VR18 = 0x0078
VOXWARE_TQ40 = 0x0079
VOXWARE_SC3 = 0x007A
VOXWARE_SC3_1 = 0x007B
SOFTSOUND = 0x0080
VOXWARE_TQ60 = 0x0081
MSRT24 = 0x0082
G729A = 0x0083
MVI_MVI2 = 0x0084
DF_G726 = 0x0085
DF_GSM610 = 0x0086
ISIAUDIO = 0x0088
ONLIVE = 0x0089
MULTITUDE_FT_SX20 = 0x008A
INFOCOM_ITS_G721_ADPCM = 0x008B
CONVEDIA_G729 = 0x008C
CONGRUENCY = 0x008D
SBC24 = 0x0091
DOLBY_AC3_SPDIF = 0x0092
MEDIASONIC_G723 = 0x0093
PROSODY_8KBPS = 0x0094
ZYXEL_ADPCM = 0x0097
PHILIPS_LPCBB = 0x0098
PACKED = 0x0099
MALDEN_PHONYTALK = 0x00A0
RACAL_RECORDER_GSM = 0x00A1
RACAL_RECORDER_G720_A = 0x00A2
RACAL_RECORDER_G723_1 = 0x00A3
RACAL_RECORDER_TETRA_ACELP = 0x00A4
NEC_AAC = 0x00B0
RAW_AAC1 = 0x00FF
RHETOREX_ADPCM = 0x0100
IRAT = 0x0101
VIVO_G723 = 0x0111
VIVO_SIREN = 0x0112
PHILIPS_CELP = 0x0120
PHILIPS_GRUNDIG = 0x0121
DIGITAL_G723 = 0x0123
SANYO_LD_ADPCM = 0x0125
SIPROLAB_ACEPLNET = 0x0130
SIPROLAB_ACELP4800 = 0x0131
SIPROLAB_ACELP8V3 = 0x0132
SIPROLAB_G729 = 0x0133
SIPROLAB_G729A = 0x0134
SIPROLAB_KELVIN = 0x0135
VOICEAGE_AMR = 0x0136
G726ADPCM = 0x0140
DICTAPHONE_CELP68 = 0x0141
DICTAPHONE_CELP54 = 0x0142
QUALCOMM_PUREVOICE = 0x0150
QUALCOMM_HALFRATE = 0x0151
TUBGSM = 0x0155
MSAUDIO1 = 0x0160
WMAUDIO2 = 0x0161
WMAUDIO3 = 0x0162
WMAUDIO_LOSSLESS = 0x0163
WMASPDIF = 0x0164
UNISYS_NAP_ADPCM = 0x0170
UNISYS_NAP_ULAW = 0x0171
UNISYS_NAP_ALAW = 0x0172
UNISYS_NAP_16K = 0x0173
SYCOM_ACM_SYC008 = 0x0174
SYCOM_ACM_SYC701_G726L = 0x0175
SYCOM_ACM_SYC701_CELP54 = 0x0176
SYCOM_ACM_SYC701_CELP68 = 0x0177
KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
DTS_DS = 0x0190
CREATIVE_ADPCM = 0x0200
CREATIVE_FASTSPEECH8 = 0x0202
CREATIVE_FASTSPEECH10 = 0x0203
UHER_ADPCM = 0x0210
ULEAD_DV_AUDIO = 0x0215
ULEAD_DV_AUDIO_1 = 0x0216
QUARTERDECK = 0x0220
ILINK_VC = 0x0230
RAW_SPORT = 0x0240
ESST_AC3 = 0x0241
GENERIC_PASSTHRU = 0x0249
IPI_HSX = 0x0250
IPI_RPELP = 0x0251
CS2 = 0x0260
SONY_SCX = 0x0270
SONY_SCY = 0x0271
SONY_ATRAC3 = 0x0272
SONY_SPC = 0x0273
TELUM_AUDIO = 0x0280
TELUM_IA_AUDIO = 0x0281
NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
FM_TOWNS_SND = 0x0300
MICRONAS = 0x0350
MICRONAS_CELP833 = 0x0351
BTV_DIGITAL = 0x0400
INTEL_MUSIC_CODER = 0x0401
INDEO_AUDIO = 0x0402
QDESIGN_MUSIC = 0x0450
ON2_VP7_AUDIO = 0x0500
ON2_VP6_AUDIO = 0x0501
VME_VMPCM = 0x0680
TPC = 0x0681
LIGHTWAVE_LOSSLESS = 0x08AE
OLIGSM = 0x1000
OLIADPCM = 0x1001
OLICELP = 0x1002
OLISBC = 0x1003
OLIOPR = 0x1004
LH_CODEC = 0x1100
LH_CODEC_CELP = 0x1101
LH_CODEC_SBC8 = 0x1102
LH_CODEC_SBC12 = 0x1103
LH_CODEC_SBC16 = 0x1104
NORRIS = 0x1400
ISIAUDIO_2 = 0x1401
SOUNDSPACE_MUSICOMPRESS = 0x1500
MPEG_ADTS_AAC = 0x1600
MPEG_RAW_AAC = 0x1601
MPEG_LOAS = 0x1602
NOKIA_MPEG_ADTS_AAC = 0x1608
NOKIA_MPEG_RAW_AAC = 0x1609
VODAFONE_MPEG_ADTS_AAC = 0x160A
VODAFONE_MPEG_RAW_AAC = 0x160B
MPEG_HEAAC = 0x1610
VOXWARE_RT24_SPEECH = 0x181C
SONICFOUNDRY_LOSSLESS = 0x1971
INNINGS_TELECOM_ADPCM = 0x1979
LUCENT_SX8300P = 0x1C07
LUCENT_SX5363S = 0x1C0C
CUSEEME = 0x1F03
NTCSOFT_ALF2CM_ACM = 0x1FC4
DVM = 0x2000
DTS2 = 0x2001
MAKEAVIS = 0x3313
DIVIO_MPEG4_AAC = 0x4143
NOKIA_ADAPTIVE_MULTIRATE = 0x4201
DIVIO_G726 = 0x4243
LEAD_SPEECH = 0x434C
LEAD_VORBIS = 0x564C
WAVPACK_AUDIO = 0x5756
OGG_VORBIS_MODE_1 = 0x674F
OGG_VORBIS_MODE_2 = 0x6750
OGG_VORBIS_MODE_3 = 0x6751
OGG_VORBIS_MODE_1_PLUS = 0x676F
OGG_VORBIS_MODE_2_PLUS = 0x6770
OGG_VORBIS_MODE_3_PLUS = 0x6771
ALAC = 0x6C61
_3COM_NBX = 0x7000 # Can't have leading digit
OPUS = 0x704F
FAAD_AAC = 0x706D
AMR_NB = 0x7361
AMR_WB = 0x7362
AMR_WP = 0x7363
GSM_AMR_CBR = 0x7A21
GSM_AMR_VBR_SID = 0x7A22
COMVERSE_INFOSYS_G723_1 = 0xA100
COMVERSE_INFOSYS_AVQSBC = 0xA101
COMVERSE_INFOSYS_SBC = 0xA102
SYMBOL_G729_A = 0xA103
VOICEAGE_AMR_WB = 0xA104
INGENIENT_G726 = 0xA105
MPEG4_AAC = 0xA106
ENCORE_G726 = 0xA107
ZOLL_ASAO = 0xA108
SPEEX_VOICE = 0xA109
VIANIX_MASC = 0xA10A
WM9_SPECTRUM_ANALYZER = 0xA10B
WMF_SPECTRUM_ANAYZER = 0xA10C
GSM_610 = 0xA10D
GSM_620 = 0xA10E
GSM_660 = 0xA10F
GSM_690 = 0xA110
GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
POLYCOM_G722 = 0xA112
POLYCOM_G728 = 0xA113
POLYCOM_G729_A = 0xA114
POLYCOM_SIREN = 0xA115
GLOBAL_IP_ILBC = 0xA116
RADIOTIME_TIME_SHIFT_RADIO = 0xA117
NICE_ACA = 0xA118
NICE_ADPCM = 0xA119
VOCORD_G721 = 0xA11A
VOCORD_G726 = 0xA11B
VOCORD_G722_1 = 0xA11C
VOCORD_G728 = 0xA11D
VOCORD_G729 = 0xA11E
VOCORD_G729_A = 0xA11F
VOCORD_G723_1 = 0xA120
VOCORD_LBC = 0xA121
NICE_G728 = 0xA122
FRACE_TELECOM_G729 = 0xA123
CODIAN = 0xA124
FLAC = 0xF1AC
EXTENSIBLE = 0xFFFE
DEVELOPMENT = 0xFFFF
KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
def _raise_bad_format(format_tag):
try:
format_name = WAVE_FORMAT(format_tag).name
except ValueError:
format_name = f'{format_tag:#06x}'
raise ValueError(f"Unknown wave file format: {format_name}. Supported "
"formats: " +
', '.join(x.name for x in KNOWN_WAVE_FORMATS))
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
Notes
-----
Assumes file pointer is immediately after the 'fmt ' id
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = struct.unpack(fmt+'I', fid.read(4))[0]
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read = 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
_raise_bad_format(format_tag)
# move file pointer to next chunk
if size > bytes_read:
fid.read(size - bytes_read)
# fmt should always be 16, 18 or 40, but handle it just in case
_handle_pad_byte(fid, size)
if format_tag == WAVE_FORMAT.PCM:
if bytes_per_second != fs * block_align:
raise ValueError("WAV header is invalid: nAvgBytesPerSec must"
" equal product of nSamplesPerSec and"
" nBlockAlign, but file has nSamplesPerSec ="
f" {fs}, nBlockAlign = {block_align}, and"
f" nAvgBytesPerSec = {bytes_per_second}")
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
block_align, mmap=False):
"""
Notes
-----
Assumes file pointer is immediately after the 'data' id
It's possible to not use all available bits in a container, or to store
samples in a container bigger than necessary, so bytes_per_sample uses
the actual reported container size (nBlockAlign / nChannels). Real-world
examples:
Adobe Audition's "24-bit packed int (type 1, 20-bit)"
nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
is:
nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
gives an example of:
nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
# Size of the data subchunk in bytes
size = struct.unpack(fmt+'I', fid.read(4))[0]
# Number of bytes per sample (sample container size)
bytes_per_sample = block_align // channels
n_samples = size // bytes_per_sample
if format_tag == WAVE_FORMAT.PCM:
if 1 <= bit_depth <= 8:
dtype = 'u1' # WAV of 8-bit integer or less are unsigned
elif bytes_per_sample in {3, 5, 6, 7}:
# No compatible dtype. Load as raw bytes for reshaping later.
dtype = 'V1'
elif bit_depth <= 64:
# Remaining bit depths can map directly to signed numpy dtypes
dtype = f'{fmt}i{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit integer data.")
elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
if bit_depth in {32, 64}:
dtype = f'{fmt}f{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit floating-point data.")
else:
_raise_bad_format(format_tag)
start = fid.tell()
if not mmap:
try:
count = size if dtype == 'V1' else n_samples
data = numpy.fromfile(fid, dtype=dtype, count=count)
except io.UnsupportedOperation: # not a C-like file
fid.seek(start, 0) # just in case it seeked, though it shouldn't
data = numpy.frombuffer(fid.read(size), dtype=dtype)
if dtype == 'V1':
# Rearrange raw bytes into smallest compatible numpy dtype
dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8'
a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize),
dtype='V1')
if is_big_endian:
a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample))
else:
a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
data = a.view(dt).reshape(a.shape[:-1])
else:
if bytes_per_sample in {1, 2, 4, 8}:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(n_samples,))
fid.seek(start + size)
else:
raise ValueError("mmap=True not compatible with "
f"{bytes_per_sample}-byte container size.")
_handle_pad_byte(fid, size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
_handle_pad_byte(fid, size)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError(f"File format {repr(str1)} not understood. Only "
"'RIFF' and 'RIFX' supported.")
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError(f"Not a WAV file. RIFF form type is {repr(str2)}.")
return file_size, is_big_endian
def _handle_pad_byte(fid, size):
# "If the chunk size is an odd number of bytes, a pad byte with value zero
# is written after ckData." So we need to seek past this after each chunk.
if size % 2:
fid.seek(1, 1)
def read(filename, mmap=False):
"""
Open a WAV file.
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise. If a file-like input without a
C-like file descriptor (e.g., :class:`python:io.BytesIO`) is
passed, this will not be writeable.
Notes
-----
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits. Data is returned in the
smallest compatible numpy int type, in left-justified format. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Non-linear PCM (mu-law, A-law) is not supported.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, block_align, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data.astype(np.int16))
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT.IEEE_FLOAT
else:
format_tag = WAVE_FORMAT.PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
|
bsd-3-clause
|
schreiberx/sweet
|
benchmarks_sphere/galewsky/galewski_rk4_robert_nonlinear_T128/pp_plot_csv.py
|
1
|
2240
|
#! /usr/bin/python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
first = True
s = 2e-5
vort_contour_levels = np.append(np.arange(-1e-4, 0, s), np.arange(s, 1e-4, s))
zoom_lat = True
zoom_lat = False
zoom_lat = 'vort' in sys.argv[1]
fontsize=8
figsize=(9, 3)
for filename in sys.argv[1:]:
print(filename)
data = np.loadtxt(filename, skiprows=3)
labelsx = data[0,1:]
labelsy = data[1:,0]
data = data[1:,1:]
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
data = data[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
data = data[0:-2]
# while labelsx[1] < 90:
# tmplabelsx = labelsx[0]
# labelsx[0:-1] = labelsx[1:]
# labelsx[-1] = tmplabelsx
#
# tmpdata = data[:,0]
# data[:,0:-1] = data[:,1:]
# data[:,-1] = tmpdata
if first:
lon_min = labelsx[0]
lon_max = labelsx[-1]
lat_min = labelsy[0]
lat_max = labelsy[-1]
new_labelsx = np.linspace(lon_min, lon_max, 7)
new_labelsy = np.linspace(lat_min, lat_max, 7)
labelsx = np.interp(new_labelsx, labelsx, labelsx)
labelsy = np.interp(new_labelsy, labelsy, labelsy)
if first:
cmin = np.amin(data)
cmax = np.amax(data)
if 'vort' in filename:
cmin *= 1.2
cmax *= 1.2
extent = (labelsx[0], labelsx[-1], labelsy[0], labelsy[-1])
plt.figure(figsize=figsize)
plt.imshow(data, interpolation='nearest', extent=extent, origin='lower', aspect='auto')
plt.clim(cmin, cmax)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsize)
plt.title(filename, fontsize=fontsize)
if 'vort' in filename:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=vort_contour_levels, linewidths=0.5)
else:
if cmin != cmax:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5)
ax = plt.gca()
ax.xaxis.set_label_coords(0.5, -0.075)
plt.xticks(labelsx, fontsize=fontsize)
plt.xlabel("Longitude", fontsize=fontsize)
plt.yticks(labelsy, fontsize=fontsize)
plt.ylabel("Latitude", fontsize=fontsize)
#plt.show()
outfilename = filename.replace('.csv', '.png')
print(outfilename)
plt.savefig(outfilename, dpi=200)
plt.close()
first = False
|
mit
|
Aasmi/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
44
|
17033
|
import tempfile
import shutil
import os.path as op
import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
temp_folder = tempfile.mkdtemp()
try:
fpath = op.join(temp_folder, 'data.pkl')
joblib.dump(splitted_data, fpath)
X_train, X_test, y_train, y_test = joblib.load(fpath, mmap_mode='r')
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
finally:
# try to release the mmap file handle in time to be able to delete
# the temporary folder under windows
del X_train, X_test, y_train, y_test
try:
shutil.rmtree(temp_folder)
except shutil.WindowsError:
warnings.warn("Could not delete temporary folder %s" % temp_folder)
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py
|
23
|
1485
|
#===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
|
gpl-3.0
|
nrhine1/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
130
|
22974
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
|
bsd-3-clause
|
drsstein/PyRat
|
Examples/LogisticRegressionModular.py
|
1
|
2349
|
# short lecture on learning logistic neurons by Geoffrey Hinton:
# https://www.youtube.com/watch?v=--_F0rbPH9M
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0, "../..")
import PyRat as pr
def run():
#generate n samples of random 2d data from each class
n = 100
x_pos = np.random.randn(2, n)
y_pos = np.ones(n)
x_neg = np.random.randn(2, n)
y_neg = np.zeros(n)
#shift negative data to center around m_neg
m_neg = np.array([2, 1]).reshape(2, -1)
x_neg += m_neg
#concatenate positive and negative examples
x = np.concatenate((x_pos, x_neg), axis=1)
y = np.concatenate((y_pos, y_neg))
n_samples = len(x[0])
#set y as 't'arget
t = y.reshape(1, -1)
#define number of training epochs (look at each example once per epoch)
n_epochs = 200
learning_rate = 10.
#initialize layer of logistic units with a single unit
n_dims = len(x)
h0 = pr.logistic(1, n_dims)
rmse = np.zeros(n_epochs)
#train network
for i in range(n_epochs):
y = h0.forward(x)
rmse[i], dEdy = pr.squared_error(t, y)
h0.backprop(dEdy, learning_rate)
#print error and weights at end of each iteration
print "RMSE:" + str(rmse[i]) + " w: " + str(h0.w)
#plot data
plt.figure(figsize=(20, 9))
plt.subplot(121)
plt.title('Separation in feature space')
plt.scatter(x_pos[0,:], x_pos[1,:], color='blue', marker='+')
plt.scatter(x_neg[0,:], x_neg[1,:], color='red', marker='o')
#separation line at p=0.5 is defined as x2 = -w1/w2 * x1 - w0/w2
w0 = h0.w[n_dims]
w = h0.w
b = -w0/w[1]
a = -w[0]/w[1]
l_x = np.linspace(-3, 4, 2)
l_y = -w[0]/w[1] * l_x -w0/w[1]
#separation line at probability p=0.25
l_y1 = -w[0]/w[1] * l_x - (w0 + np.log(1/0.25-1))/w[1]
#separation line at probability p=0.75
l_y2 = -w[0]/w[1] * l_x - (w0 + np.log(1/0.75-1))/w[1]
#plot separation lines
plt.plot(l_x, l_y, color='black')
plt.plot(l_x, l_y1, linestyle='dashed', color='red')
plt.plot(l_x, l_y2, linestyle='dashed', color='blue')
#plot mean error over iterations
e_x = np.linspace(1, n_epochs, n_epochs)
plt.subplot(122)
plt.title('Root mean squared error')
plt.plot(e_x, rmse)
plt.show()
|
mit
|
sivakasinathan/incubator
|
simulate_sequence/simulate_sequence.py
|
1
|
6309
|
from __future__ import division
from Bio import SeqIO
from Bio.Seq import Seq
import argparse
import glob
import os.path as op
import os
import numpy as np
import pandas as pd
import StringIO
# TO DO:
# - The mutate function should be changed such that it uses a more sophisticated mutation model and also allows indels
# - Currently, the transition probabilities between different array types are hard-coded; it would be better to use a MLE approach to get these probabilities from the PacBio data; or, alternatively, allow these to be set at runtime
# - Similarly, the inversion probability is hard-coded; might want to add a way to set this at runtime or estimate it from the data
# - Currently, output is to STDOUT... might be good to add a way to write to a specified file
# - The inter-higher-order-repeat divergence is also hard-coded
# - Increase the mutation rate
def mutate(seq,nMut):
"""
Given a nucleotide sequence, introduce random substitutions using a naive mutation model.
Parameters
----------
seq : str
DNA sequence to be mutated
nMut : int
Number of mutations
Returns
-------
mutSeq : str
Mutated version of the original sequence
"""
mutSeq = list(seq)
seqLen = len(seq)
mutPos = np.random.choice(range(0,seqLen),replace=False)
for n in range(0,mutPos):
randBase = mutSeq[n]
while randBase == mutSeq[n]:
randBase = np.random.choice(['A','C','G','T'])
mutSeq[n] = randBase
return "".join(mutSeq);
def simulateHOR(seqObjList,periodicity):
"""
Given a list of BioPython sequence objects and a desired periodicity,
generate the higher order repeat (HOR) monomer from the library of
sequence objects
Parameters
----------
seqObjList : list of BioPython sequence objects
Library of monomeric sequences from which to construct the HOR
periodicity : int
periodicity of the HOR
Returns
-------
horSeq : str
Sequence of the HOR of desired periodicity
monoLens: list
List of lengths of monomers that comprise horSeq
"""
monoList = []
monoList = [ str(seqObj.seq) for seqObj in np.random.choice(seqObjList,size=periodicity,replace=False) ]
monoLens = [ len(mono) for mono in monoList ]
horSeq = "".join(monoList)
return (horSeq, monoLens)
def simulateRefSeq(faFn,len,periods,inv=False, bedOutFile=None, fastaOutFile=None):
"""
Simulate a 'reference' sequence composed of higher-order periodicities of monomeric repeat units.
Parameters
----------
faFn : str
FASTA file containing a library of monomers from which to sample higher-order periodicities
len : int
Approximate maximum length of output sequence measured in terms of number of monomer units (NOT base-pair length)
periods : str
Comma-separated list of higher-order periodicities to include in the final reference sequence
inv : bool
Include inversions? (Default: False)
bedOutFile : str
File to which coords of alpha satellite monomers in reference sequence should be written. (Default: None)
fastaOutFile : str
File to which reference sequence should be written. (Default: None)
Returns
-------
refSeq : str
Simulated reference sequence
bedDf : pd.DataFrame
DataFrame containing coordinates of alpha satellite monomers in reference sequence
"""
# Probability of transitioning from one array periodicity to another
transitP=0.05
# Probability of inverting a given array
invertP=0.01
# Get list of BioPython sequence objects stored in the FASTA file
faHandle = open(faFn, "rU")
seqObjList = list(SeqIO.parse(faHandle, "fasta"))
faHandle.close()
perList = [int(p) for p in periods.split(',')]
# Get base HOR sequences and store in dict keyed on periodicity
hors={}
lens={}
for p in perList:
(s,seql) =simulateHOR(seqObjList,p)
hors[p] = s
lens[p] = seql
# Make the reference sequence
seqName="RefSeq_len"+str(len)+"_pers_"+periods
unitCtr = 0
prevPer = np.random.choice(perList)
currPer = prevPer
refSeqL = []
start = 0
lines = []
strand='+'
currInvert=0
while unitCtr < len:
transit = np.random.choice([0,1],p=[1-transitP,transitP])
if transit:
while currPer==prevPer:
currPer = int(np.random.choice(perList))
newHor = hors[currPer]
if inv==True:
currInvert = np.random.choice([0,1],p=[1-invertP,invertP])
if currInvert==1:
if strand == '+':
strand='-'
else:
strand='+'
if strand == '-':
newHor = str(Seq(newHor).reverse_complement())
refSeqL += list(newHor)
for monoLen in lens[currPer]:
lines.append(seqName + '\t' + str(start) + '\t' + str(start+monoLen) + '\t' + str(currPer) + '\t.\t' + strand +'\t.\t.\t.')
start+=monoLen
prevPer = currPer
unitCtr = unitCtr + currPer
refSeq = "".join(refSeqL)
sio = StringIO.StringIO('\n'.join(lines))
bedDf = pd.read_csv(sio, delimiter='\t', header=None)
bedDf.columns = ['chrom','start','end','mono_class','score','strand','env_st','env_en','p_val']
if bedOutFile:
fh = open(bedOutFile, 'w')
fh.write('\n'.join(lines))
fh.close()
if fastaOutFile:
fa = open(fastaOutFile,'w')
fa.write('>'+seqName+'\n')
fa.write(refSeq)
fa.close()
return refSeq, bedDf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('seqs', help='FASTA file containing DNA sequences of monomeric units', type=str)
parser.add_argument('len', help='Max. number of monomeric units in output', type=int)
parser.add_argument('periods', help='Comma-separated list of periodicities to sample', type=str)
parser.add_argument('inv', help='Include inversions in final output', action="store_true")
args = parser.parse_args()
invert=True
simulateRefSeq(args.seqs,args.len,args.periods,invert,fastaOutFile='trial.fa',bedOutFile='trial.bed')
|
mit
|
sergpolly/Thermal_adapt_scripts
|
BOOTSTRAPS/Cherry_composition_analysis_Thermo_Rnd.py
|
2
|
5786
|
import pandas as pd
import os
import subprocess as sub
import re
import sys
from Bio import SeqUtils
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
# ['DbxRefs','Description','FeaturesNum','GenomicID','GenomicLen','GenomicName','Keywords','NucsPresent','Organism_des',
# 'SourceDbxRefs','SourceOrganism','SourcePlasmid','SourceStrain','Taxonomy','BioProject','TaxonID','Organism_env',
# 'OptimumTemperature','TemperatureRange','OxygenReq','Habitat','Salinity','crit_NC','crit_WGS','crit_genlen',
# 'crit_features','crit_comp_genome','crit_plasmid']
env_dat = pd.read_csv(os.path.join(path,"env_catalog_compgenome.dat"))
#['GenomicID','cDNA','fid','pid','product','protein','status','table','ribosomal','CAI','TrOp']
gen_dat = pd.read_csv(os.path.join(path,"complete_CDS_CAI_DNA_Rnd.dat"))
# PROTEOME LEVEL AMINO ACID FREQUENCIES ...
# "proteome_all.dat"
# # file with the organisms of interest
# dat_fname = os.path.join(bib2_scr_path,'catalog_with_accesion.dat')
# dat = pd.read_csv(dat_fname)
aacids = sorted(list('CMFILVWYAGTSNQDEHRKP'))
cost_vec_path = path
akashi = os.path.join(cost_vec_path,'akashi-cost.d')
argentina = os.path.join(cost_vec_path,'argentina-cost.d')
akashi_cost = pd.read_csv(akashi,header=None,sep=' ')
argentina_cost = pd.read_csv(argentina,header=None,sep=' ')
thermo_freq = pd.read_csv(os.path.join(path,'thermo.dat'),header=None,sep=' ')
akashi_cost.set_index(0,inplace=True)
argentina_cost.set_index(0,inplace=True)
thermo_freq.set_index(0,inplace=True)
akashi_cost.sort_index(inplace=True)
argentina_cost.sort_index(inplace=True)
thermo_freq.sort_index(inplace=True)
#
gen_dat_org = gen_dat.groupby('GenomicID')
# genom_id = orgs.groups.keys() # env_dat['GenomicID'] ...
# gen_dat_grouped.get_group(idx)
#
# how to get quantile ...
# q75 = pid_cai['CAI'].quantile(q=0.75)
#
#
num_of_quantiles = 5
#
stat_dat = {'GenomicID':[],
'OptimumTemperature':[],
'TrOp':[]}
for i in range(num_of_quantiles):
stat_dat['q%d'%i] = []
stat_dat['R20_q%d'%i] = []
stat_dat['Akashi_q%d'%i] = []
#
#
for idx,topt in env_dat[['GenomicID','OptimumTemperature']].itertuples(index=False):
cds_cai_dat = gen_dat_org.get_group(idx)
# is it a translationally optimized organism ?
all,any = cds_cai_dat['TrOp'].all(),cds_cai_dat['TrOp'].any()
if all == any:
trans_opt = all
else: #any != all
print "%s@T=%f: Something wrong is happening: TrOp flag is not same for all ..."%(idx,topt)
# THIS IS just a stupid precaution measure, in case we messed something upstream ...
# not that stupid after all, because NaN is behaving badly here ...
if cds_cai_dat['TrOp'].notnull().all():
#
# we can use this 'qcut' function from pandas to divide our proteins by the quantiles ...
category,bins = pd.qcut(cds_cai_dat['CAI'],q=num_of_quantiles,retbins=True,labels=False)
#
stat_dat['GenomicID'].append(idx)
stat_dat['OptimumTemperature'].append(topt)
stat_dat['TrOp'].append(trans_opt)
#
# then we could iterate over proteins/cDNAs in these categories ...
for cat in range(num_of_quantiles):
cds_cai_category = cds_cai_dat[category==cat]
total_length = cds_cai_category['protein'].str.len().sum()
IVYWREL = sum(cds_cai_category['protein'].str.count(aa).sum() for aa in list('IVYWREL'))
# IVYWREL = cds_cai_category['protein'].str.count('|'.join("IVYWREL")).sum() # tiny bit slower ...
f_IVYWREL = float(IVYWREL)/float(total_length)
# 20-vector for of amino acid composition ...
aa_freq_20 = np.true_divide([cds_cai_category['protein'].str.count(aa).sum() for aa in aacids],float(total_length))
# slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
_1,_2,R20,_4,_5 = stats.linregress(aa_freq_20, thermo_freq[1])
# Akashi ...
cost = np.dot(aa_freq_20,akashi_cost[1])
# appending ...
#
#
stat_dat['q%d'%cat].append(f_IVYWREL)
stat_dat['R20_q%d'%cat].append(R20)
stat_dat['Akashi_q%d'%cat].append(cost)
#
#
#
cai_stats_quant = pd.DataFrame(stat_dat)
#
cai_stats_quant_TrOp = cai_stats_quant[cai_stats_quant.TrOp]
cai_stats_quant_noTrOp = cai_stats_quant[~cai_stats_quant.TrOp]
plt.clf()
bins = np.linspace(-0.05,0.05,50)
# plt.hist(list(cai_stats_quant_TrOp.q4 - cai_stats_quant_TrOp.q1),bins=bins,color='blue')
plt.hist(list(cai_stats_quant.q4 - cai_stats_quant.q1),bins=bins,color='red',alpha=0.8)#,cumulative=True)
# plt.show()
plt.savefig("IVYWREL_quantile_hist.png")
plt.clf()
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q1,'bo',alpha=0.8)
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q4,'ro',alpha=0.8)
# plt.show()
plt.savefig("IVYWREL_dots_compare.png")
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].mean(),yerr=cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].std(),fmt='o')
plt.xlim(0,6)
plt.show()
# R20 is flat on average (strange bi-modality?!)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| ~~+ ~~-
# Akashi is flat on average (strange local minimum at middle CAI quantile)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| ~ ~
# IVYWREL is declining on average (?!)
# | meso thermo
# ------+-------------
# TrOp | NA NA
# noTrOp| -- --
|
mit
|
endlessm/chromium-browser
|
tools/perf/cli_tools/soundwave/worker_pool_test.py
|
10
|
1670
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
try:
import sqlite3
except ImportError:
pass
import tempfile
import unittest
from cli_tools.soundwave import pandas_sqlite
from cli_tools.soundwave import worker_pool
from core.external_modules import pandas
from telemetry import decorators
def TestWorker(args):
con = sqlite3.connect(args.database_file)
def Process(item):
# Add item to the database.
df = pandas.DataFrame({'item': [item]})
df.to_sql('items', con, index=False, if_exists='append')
worker_pool.Process = Process
@unittest.skipIf(pandas is None, 'pandas not available')
class TestWorkerPool(unittest.TestCase):
@decorators.Disabled('all') # crbug.com/939777
def testWorkerPoolRun(self):
tempdir = tempfile.mkdtemp()
try:
args = argparse.Namespace()
args.database_file = os.path.join(tempdir, 'test.db')
args.processes = 3
schema = pandas_sqlite.DataFrame([('item', int)])
items = range(20) # We'll write these in the database.
con = sqlite3.connect(args.database_file)
try:
pandas_sqlite.CreateTableIfNotExists(con, 'items', schema)
with open(os.devnull, 'w') as devnull:
worker_pool.Run(
'Processing:', TestWorker, args, items, stream=devnull)
df = pandas.read_sql('SELECT * FROM items', con)
# Check all of our items were written.
self.assertItemsEqual(df['item'], items)
finally:
con.close()
finally:
shutil.rmtree(tempdir)
|
bsd-3-clause
|
DonBeo/statsmodels
|
statsmodels/tsa/filters/hp_filter.py
|
27
|
3507
|
from __future__ import absolute_import
from scipy import sparse
from scipy.sparse import dia_matrix, eye as speye
from scipy.sparse.linalg import spsolve
import numpy as np
from ._utils import _maybe_get_pandas_wrapper
def hpfilter(X, lamb=1600):
"""
Hodrick-Prescott filter
Parameters
----------
X : array-like
The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1)
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : array
The estimated cycle in the data given lamb.
trend : array
The estimated trend in the data given lamb.
Examples
---------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> dates = sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3')
>>> index = pd.DatetimeIndex(dates)
>>> dta.set_index(index, inplace=True)
>>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600)
>>> gdp_decomp = dta[['realgdp']]
>>> gdp_decomp["cycle"] = cycle
>>> gdp_decomp["trend"] = trend
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax,
... fontsize=16);
>>> plt.show()
.. plot:: plots/hpf_plot.py
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `X`. by solving
min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I - lamb*K'K)X
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empricial Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
"""
_pandas_wrapper = _maybe_get_pandas_wrapper(X)
X = np.asarray(X, float)
if X.ndim > 1:
X = X.squeeze()
nobs = len(X)
I = speye(nobs,nobs)
offsets = np.array([0,1,2])
data = np.repeat([[1.],[-2.],[1.]], nobs, axis=1)
K = dia_matrix((data, offsets), shape=(nobs-2,nobs))
import scipy
if (X.dtype != np.dtype('<f8') and
int(scipy.__version__[:3].split('.')[1]) < 11):
#scipy umfpack bug on Big Endian machines, will be fixed in 0.11
use_umfpack = False
else:
use_umfpack = True
if scipy.__version__[:3] == '0.7':
#doesn't have use_umfpack option
#will be broken on big-endian machines with scipy 0.7 and umfpack
trend = spsolve(I+lamb*K.T.dot(K), X)
else:
trend = spsolve(I+lamb*K.T.dot(K), X, use_umfpack=use_umfpack)
cycle = X-trend
if _pandas_wrapper is not None:
return _pandas_wrapper(cycle), _pandas_wrapper(trend)
return cycle, trend
|
bsd-3-clause
|
JavierGarciaD/Algorithmic_Thinking
|
src/project_2.py
|
1
|
3172
|
'''
Created on 10/09/2014
@author: javgar119
'''
# General imports
import networkx as nx
from collections import deque
from pprint import pprint
import matplotlib.pyplot as plt
from hw_1 import er_algorithm
# from app_2 import *
def bfs_visited(ugraph, start_node):
"""
Takes the undirected graph ugraph and the node start_node and returns
the set consisting of all nodes that are visited by a breadth-first
search that starts at start_node.
:param ugraph: dictionary representing a graph
:param start_node: node to search
"""
my_queue = deque([start_node])
visited = set([start_node])
while 0 < len(my_queue):
node = my_queue.popleft()
for each_neigtbor in ugraph[node]:
if each_neigtbor not in visited:
visited.add(each_neigtbor)
my_queue.append(each_neigtbor)
return visited
def cc_visited(ugraph):
"""
Takes the undirected graph ugraph and returns a list of sets, where each set
consists of all the nodes (and nothing else) in a connected component, and
there is exactly one set in the list for each connected component in ugraph
and nothing else.
:param ugraph:dictionary representing a graph
"""
nodes = list(ugraph.keys())
ccomponent = []
while nodes:
current_node = nodes.pop()
neighbors = bfs_visited(ugraph, current_node)
ccomponent.append(neighbors)
for item in neighbors:
if item in nodes:
nodes.remove(item)
return ccomponent
def largest_cc_size(ugraph):
"""
Takes the undirected graph ugraph and returns the size (an integer) of the
largest connected component in ugraph
:param ugraph:
"""
ccomponent = cc_visited(ugraph)
result = 0
for each_component in ccomponent:
result = max(result, len(each_component))
return result
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def compute_resilience(ugraph, attack_order):
"""
Takes the undirected graph ugraph, a list of nodes attack_order and iterates
through the nodes in attack_order. For each node in the list, the function
removes the given node and its edges from the graph and then computes the
size of the largest connected component for the resulting graph.
:param ugraph:
:param attack_order:
"""
result = [largest_cc_size(ugraph)]
for attack in attack_order:
# delete node references in other nodes
for key, val in ugraph.items():
if attack in val:
ugraph[key].remove(attack)
# delete node
del ugraph[attack]
largest = largest_cc_size(ugraph)
result.append(largest)
return result
if __name__ == '__main__':
er = er_algorithm(1347, 0.0034328666090845212)
upa = upa_algorith(1347, 5)
print('er: ',largest_cc_size(er))
print('upa: ',largest_cc_size(upa))
|
gpl-3.0
|
zorojean/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
python-control/python-control
|
examples/robust_mimo.py
|
2
|
5219
|
"""robust_mimo.py
Demonstrate mixed-sensitivity H-infinity design for a MIMO plant.
Based on Example 3.8 from Multivariable Feedback Control, Skogestad and Postlethwaite, 1st Edition.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from control import tf, ss, mixsyn, step_response
def weighting(wb, m, a):
"""weighting(wb,m,a) -> wf
wb - design frequency (where |wf| is approximately 1)
m - high frequency gain of 1/wf; should be > 1
a - low frequency gain of 1/wf; should be < 1
wf - SISO LTI object
"""
s = tf([1, 0], [1])
return (s/m + wb) / (s + wb*a)
def plant():
"""plant() -> g
g - LTI object; 2x2 plant with a RHP zero, at s=0.5.
"""
den = [0.2, 1.2, 1]
gtf = tf([[[1], [1]],
[[2, 1], [2]]],
[[den, den],
[den, den]])
return ss(gtf)
# as of this writing (2017-07-01), python-control doesn't have an
# equivalent to Matlab's sigma function, so use a trivial stand-in.
def triv_sigma(g, w):
"""triv_sigma(g,w) -> s
g - LTI object, order n
w - frequencies, length m
s - (m,n) array of singular values of g(1j*w)"""
m, p, _ = g.frequency_response(w)
sjw = (m*np.exp(1j*p)).transpose(2, 0, 1)
sv = np.linalg.svd(sjw, compute_uv=False)
return sv
def analysis():
"""Plot open-loop responses for various inputs"""
g = plant()
t = np.linspace(0, 10, 101)
_, yu1 = step_response(g, t, input=0, squeeze=True)
_, yu2 = step_response(g, t, input=1, squeeze=True)
# linear system, so scale and sum previous results to get the
# [1,-1] response
yuz = yu1 - yu2
plt.figure(1)
plt.subplot(1, 3, 1)
plt.plot(t, yu1[0], label='$y_1$')
plt.plot(t, yu1[1], label='$y_2$')
plt.xlabel('time')
plt.ylabel('output')
plt.ylim([-1.1, 2.1])
plt.legend()
plt.title('o/l response\nto input [1,0]')
plt.subplot(1, 3, 2)
plt.plot(t, yu2[0], label='$y_1$')
plt.plot(t, yu2[1], label='$y_2$')
plt.xlabel('time')
plt.ylabel('output')
plt.ylim([-1.1, 2.1])
plt.legend()
plt.title('o/l response\nto input [0,1]')
plt.subplot(1, 3, 3)
plt.plot(t, yuz[0], label='$y_1$')
plt.plot(t, yuz[1], label='$y_2$')
plt.xlabel('time')
plt.ylabel('output')
plt.ylim([-1.1, 2.1])
plt.legend()
plt.title('o/l response\nto input [1,-1]')
def synth(wb1, wb2):
"""synth(wb1,wb2) -> k,gamma
wb1: S weighting frequency
wb2: KS weighting frequency
k: controller
gamma: H-infinity norm of 'design', that is, of evaluation system
with loop closed through design
"""
g = plant()
wu = ss([], [], [], np.eye(2))
wp1 = ss(weighting(wb=wb1, m=1.5, a=1e-4))
wp2 = ss(weighting(wb=wb2, m=1.5, a=1e-4))
wp = wp1.append(wp2)
k, _, info = mixsyn(g, wp, wu)
return k, info[0]
def step_opposite(g, t):
"""reponse to step of [-1,1]"""
_, yu1 = step_response(g, t, input=0, squeeze=True)
_, yu2 = step_response(g, t, input=1, squeeze=True)
return yu1 - yu2
def design():
"""Show results of designs"""
# equal weighting on each output
k1, gam1 = synth(0.25, 0.25)
# increase "bandwidth" of output 2 by moving crossover weighting frequency 100 times higher
k2, gam2 = synth(0.25, 25)
# now weight output 1 more heavily
# won't plot this one, just want gamma
_, gam3 = synth(25, 0.25)
print('design 1 gamma {:.3g} (Skogestad: 2.80)'.format(gam1))
print('design 2 gamma {:.3g} (Skogestad: 2.92)'.format(gam2))
print('design 3 gamma {:.3g} (Skogestad: 6.73)'.format(gam3))
# do the designs
g = plant()
w = np.logspace(-2, 2, 101)
I = ss([], [], [], np.eye(2))
s1 = I.feedback(g*k1)
s2 = I.feedback(g*k2)
# frequency response
sv1 = triv_sigma(s1, w)
sv2 = triv_sigma(s2, w)
plt.figure(2)
plt.subplot(1, 2, 1)
plt.semilogx(w, 20*np.log10(sv1[:, 0]), label=r'$\sigma_1(S_1)$')
plt.semilogx(w, 20*np.log10(sv1[:, 1]), label=r'$\sigma_2(S_1)$')
plt.semilogx(w, 20*np.log10(sv2[:, 0]), label=r'$\sigma_1(S_2)$')
plt.semilogx(w, 20*np.log10(sv2[:, 1]), label=r'$\sigma_2(S_2)$')
plt.ylim([-60, 10])
plt.ylabel('magnitude [dB]')
plt.xlim([1e-2, 1e2])
plt.xlabel('freq [rad/s]')
plt.legend()
plt.title('Singular values of S')
# time response
# in design 1, both outputs have an inverse initial response; in
# design 2, output 2 does not, and is very fast, while output 1
# has a larger initial inverse response than in design 1
time = np.linspace(0, 10, 301)
t1 = (g*k1).feedback(I)
t2 = (g*k2).feedback(I)
y1 = step_opposite(t1, time)
y2 = step_opposite(t2, time)
plt.subplot(1, 2, 2)
plt.plot(time, y1[0], label='des. 1 $y_1(t))$')
plt.plot(time, y1[1], label='des. 1 $y_2(t))$')
plt.plot(time, y2[0], label='des. 2 $y_1(t))$')
plt.plot(time, y2[1], label='des. 2 $y_2(t))$')
plt.xlabel('time [s]')
plt.ylabel('response [1]')
plt.legend()
plt.title('c/l response to reference [1,-1]')
if __name__ == "__main__":
analysis()
design()
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
|
bsd-3-clause
|
butala/pyrsss
|
pyrsss/mag/process_hdf.py
|
1
|
13040
|
from __future__ import division
import sys
import logging
import warnings
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from datetime import timedelta
from itertools import groupby
from collections import OrderedDict
import numpy as NP
import pandas as PD
import scipy.signal
from iaga2hdf import read_hdf, write_hdf
from ..util.nan import nan_interp
from ..signal.lfilter import lp_fir_filter
from ..stats.stats import despike
logger = logging.getLogger('pyrsss.mag.process_hdf')
"""
A NOTE ON THE COORDINATE FRAME
The following describes the XYZ convention
(from https://github.com/usgs/geomag-algorithms/blob/master/docs/algorithms/XYZ.md):
- X is the magnitude of the geographic north pole component of the H vector;
- Y is the magnitude of the east component of the H vector;
- Z is the downward component of the geomagnetic field, same as before.
"""
def minute_interval_filter_firwin():
"""
Synthesize and return impulse response of minute interval filter
for analysis of magnetometer data. Design a filter using the
window method. The filter is designed to stop from 0 to 0.75 mHz
and pass from 1 mHz to Nyquist.
For reference: this is the filter used to process the data for the
Space Weather 2017 paper.
fs_1m = 1./60
bands_1m = [0, 1e-3 - 0.25e-3, 1e-3, fs_1m/2]
desired_1m = [0, 1]
_ = fir_response(h_1m, bands_1m, desired_1m, Hz=fs_1m)
FIR filter is linear phase type 1
band 0 abs deviations from 0:
min = 4.466e-05 (db=-87.000846)
med = -4.218e-03
max = 3.027e-02 (db=-30.379491
band 1 abs deviations from 1:
min = 4.412e-06 (db=-107.106525)
med = 2.398e-03
std = 4.059e-03
max = 2.982e-02 (db=-30.509428
"""
Ts = 60
fs = 1. / Ts
fn = fs / 2
width = 0.25e-3
cutoff = 1e-3
ripple = 30
numtaps, beta = scipy.signal.kaiserord(ripple, width / fn)
if numtaps % 2 == 0:
numtaps += 1
return scipy.signal.firwin(numtaps,
cutoff - width/2,
width=width,
pass_zero=False,
nyq=fn)
def minute_interval_filter(N_remez=201):
"""
Synthesize and return impulse response of minute interval filter
for analysis of magnetometer data. Design a length *N_remez*
min-max optimal filter. The filter is designed to stop from 0 to
0.75 mHz and pass from 1 mHz to Nyquist.
fs_1m = 1./60
bands_1m = [0, 1e-3 - 0.25e-3, 1e-3, fs_1m/2]
desired_1m = [0, 1]
_ = fir_response(h_1m, bands_1m, desired_1m, Hz=fs_1m)
FIR filter is linear phase type 1
band 0 abs deviations from 0:
min = 1.487e-05 (db=-96.553225)
med = -1.102e-03
max = 1.569e-03 (db=-56.089238
band 1 abs deviations from 1:
min = 1.018e-06 (db=-119.848775)
med = -1.066e-05
std = 4.820e-04
max = 1.571e-03 (db=-56.074809
"""
return scipy.signal.remez(N_remez,
[0, 0.75e-3, 1e-3, 1./60/2],
[0, 1],
[1, 1],
Hz=1./60)
def second_interval_filter(N_remez=4001):
"""
Synthesize and return impulse response of second interval filter
for analysis of magnetometer data. Design a length *N_remez*
min-max optimal filter. The filter is designed to stop from 0 to
0.25 mHz, pass from 1 to 100 mHz, and stop from 101 mHz to
Nyquist.
fs_1m = 1.
bands_1s = [0, 1e-3 - 0.25e-3,
1e-3, 100e-3,
101e-3, fs_1s/2]
desired_1s = [0, 1, 0]
_ = fir_response(h_1m, bands_1m, desired_1m, Hz=fs_1m)
FIR filter is linear phase type 1
band 0: 0 Hz -- 750 \muHz
abs deviations from 0 statistics:
min = 6.872e-05 (db=-83.257739)
med = -4.906e-02
max = 7.486e-01 (db=-2.514808)
band 1: 1 mHz -- 100 mHz
abs deviations from 1 statistics:
min = 2.323e-07 (db=-132.678419)
med = -6.093e-07
std = 5.871e-04
max = 2.349e-03 (db=-52.584106)
band 2: 101 mHz -- 500 mHz
abs deviations from 0 statistics:
min = 1.430e-07 (db=-136.893586)
med = -1.353e-03
max = 2.853e-03 (db=-50.895031)
"""
return scipy.signal.remez(N_remez,
[0, 0.25e-3, 1e-3, 100e-3, 101e-3, 0.5],
[0, 1, 0],
[1, 1, 1],
Hz=1)
def consecutive_nans(x, y):
"""
Return the maximum number of nans encountered in the logical or of
the *x* and *y* time series.
"""
nan = NP.isnan(x) | NP.isnan(y)
lengths = []
for key, group in groupby(nan):
if key == True:
lengths.append(len(list(group)))
if lengths:
return max(lengths)
return 0
def process_timeseries(dt,
Bx,
By,
c1='B_X',
c2='B_Y',
despike_data=True,
remove_mean=True):
"""
Process surface magnetic field measurement time series with
indices *dt* and components *Bx* and *By*. Output a
:class:`DataFrame` with columns *c1* and *c2* associated with the
processed output time series. If *despike_data*, remove outliers
prior to filtering. If *remove_mean*, remove the mean from the
output time series.
"""
warnings.warn('use process_df instead',
PendingDeprecationWarning)
n = consecutive_nans(Bx, By)
interval = (dt[1] - dt[0]).total_seconds()
if n > 0:
logger.warning('longest contiguous gap = {:.2f} minutes'.format(n * interval / 60))
# fill data gaps via linear interpolation
Bx = nan_interp(Bx)
By = nan_interp(By)
if despike_data:
# remove outliers
df = PD.DataFrame(index=dt,
data={'Bx': Bx,
'By': By})
df = despike(df)
dt = df.index.to_pydatetime()
Bx = df.Bx.values
By = df.By.values
# apply 1 - 100 mHz bandpass filter
if interval == 1.0:
h = second_interval_filter()
elif interval == 60.0:
h = minute_interval_filter()
else:
raise ValueError('1 to 100 mHz filter not yet synthesized for {} s interval data'.format(interval))
Bx_filtered, dt_filtered = lp_fir_filter(h, Bx, mode='valid', index=dt)
By_filtered = lp_fir_filter(h, By, mode='valid')
# remove mean
if remove_mean:
Bx_filtered -= NP.mean(Bx_filtered)
By_filtered -= NP.mean(By_filtered)
# build DataFrame and store to disk
return PD.DataFrame(index=dt_filtered,
data={c1: Bx_filtered,
c2: By_filtered})
def process(hdf_fname,
source_key='B_raw',
key='B',
he=False,
despike_data=True,
remove_mean=True):
"""
Process the magnetic field columns of *hdf_fname*, applying
pre-processing (nan interpolation) and a band-pass filter. Look
for input at *source_key* and store output under identifier
*key*. If *he*, process the H and E magnetic field components. If
*remove_mean*, remove the mean from each column.
"""
logger.info('processing {}'.format(hdf_fname))
df_raw, header = read_hdf(hdf_fname, source_key)
dt = df_raw.index.to_pydatetime()
Bx_raw = df_raw['B_X'].values * 1e-9
By_raw = df_raw['B_Y'].values * 1e-9
df_filtered = process_timeseries(dt,
Bx_raw,
By_raw,
despike_data=despike_data,
remove_mean=remove_mean)
if he:
Bh_raw = df_raw['B_H'].values * 1e-9
Be_raw = df_raw['B_E'].values * 1e-9
df_he_filtered = process_timeseries(dt,
Bh_raw,
Be_raw,
c1='B_H',
c2='B_E',
despike_data=despike_data,
remove_mean=remove_mean)
df_filtered = df_filtered.join(df_he_filtered)
write_hdf(hdf_fname, df_filtered, key, header)
return hdf_fname
def fill_nans(df, delta=None):
"""
"""
if not delta:
dt_diff = NP.diff(df.index.values)
delta_timedelta64 = min(dt_diff)
delta_seconds = delta_timedelta64 / NP.timedelta64(1, 's')
delta = timedelta(seconds=delta_seconds)
logger.info('Using delta = {} (s)'.format(delta.total_seconds()))
index_new = PD.date_range(start=df.index[0],
end=df.index[-1],
freq=delta)
missing = sorted(set(index_new) - set(df.index))
if missing:
logger.warning('Missing time indices (filled by NaNs):')
for x in missing:
logger.warning(x)
return df.reindex(index_new, copy=False), delta
def nan_interpolate(df):
"""
Reference:
https://stackoverflow.com/questions/29007830/identifying-consecutive-nans-with-pandas
"""
sum_nan = df.isnull().sum()
df_null_int = df.isnull().astype(int)
for col in df.columns:
max_run = df[col].isnull().astype(int).groupby(df[col].notnull().astype(int).cumsum()).sum()
if sum_nan[col]:
# BELOW IS BROKEN!!!
pass
# logger.warning('column {} has {} NaNs ({} max consecutive run)'.format(col,
# sum_nan[col],
# max_run))
df.interpolate(inplace=True)
return df
def process_df(df,
delta=None,
despike_data=True,
subtract_median=True):
"""
"""
if despike_data:
logger.info('despike')
df = despike(df)
logger.info('Fill gaps')
df, delta = fill_nans(df, delta=delta)
logger.info('Gap interpolation')
df = nan_interpolate(df)
# apply 1 - 100 mHz bandpass filter
interval = delta.total_seconds()
if interval == 1.0:
h = second_interval_filter()
elif interval == 60.0:
h = minute_interval_filter()
else:
raise ValueError('1 to 100 mHz filter not yet synthesized for {} s interval data'.format(interval))
data = OrderedDict()
dt = df.index.to_pydatetime()
for i, col in enumerate(df.columns):
logger.info('Band-pass filter {}'.format(col))
if i == 0:
col_filtered, dt_filtered = lp_fir_filter(h, df[col].values, mode='valid', index=dt)
else:
col_filtered = lp_fir_filter(h, df[col].values, mode='valid')
data[col] = col_filtered
df_filtered = PD.DataFrame(index=dt_filtered,
data=data)
# remove median
if subtract_median:
logger.info('Subtract median')
df_filtered = df_filtered.sub(df.median(axis=1), axis=0)
return df_filtered
def process_new(hdf_fname,
source_key='B_raw',
key='B',
despike_data=True,
subtract_median=True):
"""
"""
logger.info('processing {}'.format(hdf_fname))
df_raw, header = read_hdf(hdf_fname, source_key)
df = df_raw[['B_X', 'B_Y']] * 1e-9
df_filtered = process_df(df,
despike_data=despike_data,
subtract_median=subtract_median)
write_hdf(hdf_fname, df_filtered, key, header)
return hdf_fname
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Apply preprocessing steps to raw magnetometer data.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('hdf_fnames',
type=str,
nargs='*',
metavar='hdf_fname',
help='HDF file record to process')
parser.add_argument('--source-key',
'-s',
type=str,
default='B_raw',
help='')
parser.add_argument('--key',
'-k',
type=str,
default='B',
help='key to associate with the processed records')
parser.add_argument('--he',
action='store_true',
help='include results in HE coordinate')
args = parser.parse_args(argv[1:])
for hdf_fname in args.hdf_fnames:
process(hdf_fname,
source_key=args.source_key,
key=args.key,
he=args.he)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.