text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Package level logger
import random
import string
import itertools
import logging
logger = logging.getLogger("assetid")
logger.addHandler(logging.NullHandler())
class IoosUrn(object):
""" https://geo-ide.noaa.gov/wiki/index.php?title=IOOS_Conventions_for_Observing_Asset_Identifiers """
def __init__(self, *args, **kwargs):
self.asset_type = None
self.authority = None
self.label = None
self.component = None
self.fragment = None
self.discriminant = None
for k, v in kwargs.items():
setattr(self, k, v)
@staticmethod
def from_string(urn_string):
complete = urn_string.split('#')
fragment = None
if len(complete) > 1:
fragment = complete[1]
parts = complete[0].split(':')
if len(parts) < 5:
return IoosUrn()
urn = IoosUrn()
urn.asset_type = parts[2]
urn.authority = parts[3]
urn.label = parts[4]
urn.fragment = fragment
if len(parts) > 5:
if urn.asset_type == 'station':
urn.discriminant = parts[5]
elif len(parts) > 6:
# Also a discriminant specified, so this has to be the component
urn.component = parts[5]
else:
logger.debug("Assuming that {0} is the 'component' piece of the URN (not the 'discriminant')".format(parts[5]))
urn.component = parts[5]
if len(parts) > 6:
urn.discriminant = parts[6]
if len(parts) > 7:
logger.warning("The URN is too long stripping off '{}'".format(':'.join(parts[7:])))
return urn
@staticmethod
def from_dict(authority, label, data_dict):
def clean_value(v):
return v.replace('(', '').replace(')', '').strip().replace(' ', '_')
fragments = []
intervals = [] # Because it can be part of cell_methods and its own dict key
if 'cell_methods' in data_dict and data_dict['cell_methods']:
cm = data_dict['cell_methods']
keys = []
values = []
sofar = ''
for i, c in enumerate(cm):
if c == ":":
if len(keys) == len(values):
keys.append(clean_value(sofar))
else:
for j in reversed(range(0, i)):
if cm[j] == " ":
key = clean_value(cm[j+1:i])
values.append(clean_value(sofar.replace(key, '')))
keys.append(key)
break
sofar = ''
else:
sofar += c
# The last value needs appending
values.append(clean_value(sofar))
pairs = zip(keys, values)
mems = []
cell_intervals = []
pairs = sorted(pairs)
for group, members in itertools.groupby(pairs, lambda x: x[0]):
if group == 'interval':
cell_intervals = [m[1] for m in members]
elif group in ['time', 'area']: # Ignore 'comments'. May need to add more things here...
member_strings = []
for m in members:
member_strings.append('{}:{}'.format(group, m[1]))
mems.append(','.join(member_strings))
if mems:
fragments.append('cell_methods={}'.format(','.join(mems)))
if cell_intervals:
intervals += cell_intervals
if 'bounds' in data_dict and data_dict['bounds']:
fragments.append('bounds={0}'.format(data_dict['bounds']))
if 'vertical_datum' in data_dict and data_dict['vertical_datum']:
fragments.append('vertical_datum={0}'.format(data_dict['vertical_datum']))
if 'interval' in data_dict and data_dict['interval']:
if isinstance(data_dict['interval'], (list, tuple,)):
intervals += data_dict['interval']
elif isinstance(data_dict['interval'], str):
intervals += [data_dict['interval']]
if 'standard_name' in data_dict and data_dict['standard_name']:
variable_name = data_dict['standard_name']
elif 'name' in data_dict and data_dict['name']:
variable_name = data_dict['name']
else:
variable_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(8)).lower()
logger.warning("Had to randomly generate a variable name: {0}".format(variable_name))
if intervals:
intervals = list(set(intervals)) # Unique them
fragments.append('interval={}'.format(','.join(intervals)))
urn = IoosUrn(asset_type='sensor',
authority=authority,
label=label,
component=variable_name,
fragment=';'.join(fragments) if fragments else None,
discriminant=data_dict.get('discriminant'))
return urn
@property
def urn(self):
if self.valid() is False:
return None
z = 'urn:ioos:{0}:{1}:{2}'.format(self.asset_type, self.authority, self.label)
if self.component is not None:
z += ':{}'.format(self.component)
if self.discriminant is not None:
z += ':{}'.format(self.discriminant)
if self.fragment is not None:
z += '#{}'.format(self.fragment)
return z.lower()
def attributes(self, combine_interval=True):
"""
By default, this will put the `interval` as part of the `cell_methods`
attribute (NetCDF CF style). To return `interval` as its own key, use
the `combine_interval=False` parameter.
"""
if self.valid() is False:
return dict()
if self.asset_type != 'sensor':
logger.error("This function only works on 'sensor' URNs.")
return dict()
d = dict(standard_name=self.component)
if self.discriminant is not None:
d['discriminant'] = self.discriminant
intervals = []
cell_methods = []
if self.fragment:
for section in self.fragment.split(';'):
key, values = section.split('=')
if key == 'interval':
# special case, intervals should be appended to the cell_methods
for v in values.split(','):
intervals.append(v)
else:
if key == 'cell_methods':
value = [ x.replace('_', ' ').replace(':', ': ') for x in values.split(',') ]
cell_methods = value
else:
value = ' '.join([x.replace('_', ' ').replace(':', ': ') for x in values.split(',')])
d[key] = value
if combine_interval is True:
if cell_methods and intervals:
if len(cell_methods) == len(intervals):
d['cell_methods'] = ' '.join([ '{} (interval: {})'.format(x[0], x[1].upper()) for x in zip(cell_methods, intervals) ])
else:
d['cell_methods'] = ' '.join(cell_methods)
for i in intervals:
d['cell_methods'] += ' (interval: {})'.format(i.upper())
elif cell_methods:
d['cell_methods'] = ' '.join(cell_methods)
for i in intervals:
d['cell_methods'] += ' (interval: {})'.format(i.upper())
elif intervals:
raise ValueError("An interval without a cell_method is not allowed! Not possible!")
else:
d['cell_methods'] = ' '.join(cell_methods)
d['interval'] = ','.join(intervals).upper()
if 'vertical_datum' in d:
d['vertical_datum'] = d['vertical_datum'].upper()
return d
def valid(self):
ASSET_TYPES = ['station', 'network', 'sensor', 'survey']
try:
assert self.authority is not None
except AssertionError:
logger.error('An "authority" is required')
return False
try:
assert self.label is not None
except AssertionError:
logger.error('A "label" is required')
return False
try:
assert self.asset_type in ASSET_TYPES
except AssertionError:
logger.error('asset_type {0} is unknown. Must be one of: {1}'.format(self.asset_type, ', '.join(ASSET_TYPES)))
return False
if self.asset_type == 'station':
try:
assert self.component is None
except AssertionError:
logger.error('An asset_type of "station" may not have a "component".')
return False
return True
def __str__(self):
return self.urn
def __repr__(self):
return self.__str__()
|
axiom-data-science/assetid
|
assetid/urn.py
|
Python
|
mit
| 9,163
|
[
"NetCDF"
] |
a53da9202c8f840d3f9eab29d867b56eaadf841602f0b0371e460449a4d75126
|
"""
.. _tut-compute-covariance:
Computing a covariance matrix
=============================
Many methods in MNE, including source estimation and some classification
algorithms, require covariance estimations from the recordings.
In this tutorial we cover the basics of sensor covariance computations and
construct a noise covariance matrix that can be used when computing the
minimum-norm inverse solution. For more information, see
:ref:`minimum_norm_estimates`.
"""
# %%
import os.path as op
import mne
from mne.datasets import sample
# %%
# Source estimation method such as MNE require a noise estimations from the
# recordings. In this tutorial we cover the basics of noise covariance and
# construct a noise covariance matrix that can be used when computing the
# inverse solution. For more information, see :ref:`minimum_norm_estimates`.
data_path = sample.data_path()
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# %%
# The definition of noise depends on the paradigm. In MEG it is quite common
# to use empty room measurements for the estimation of sensor noise. However if
# you are dealing with evoked responses, you might want to also consider
# resting state brain activity as noise.
# First we compute the noise using empty room recording. Note that you can also
# use only a part of the recording with tmin and tmax arguments. That can be
# useful if you use resting state as a noise baseline. Here we use the whole
# empty room recording to compute the noise covariance (``tmax=None`` is the
# same as the end of the recording, see :func:`mne.compute_raw_covariance`).
#
# Keep in mind that you want to match your empty room dataset to your
# actual MEG data, processing-wise. Ensure that filters
# are all the same and if you use ICA, apply it to your empty-room and subject
# data equivalently. In this case we did not filter the data and
# we don't use ICA. However, we do have bad channels and projections in
# the MEG data, and, hence, we want to make sure they get stored in the
# covariance object.
raw_empty_room.info['bads'] = [
bb for bb in raw.info['bads'] if 'EEG' not in bb]
raw_empty_room.add_proj(
[pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']])
noise_cov = mne.compute_raw_covariance(
raw_empty_room, tmin=0, tmax=None)
# %%
# Now that you have the covariance matrix in an MNE-Python object you can
# save it to a file with :func:`mne.write_cov`. Later you can read it back
# using :func:`mne.read_cov`.
#
# You can also use the pre-stimulus baseline to estimate the noise covariance.
# First we have to construct the epochs. When computing the covariance, you
# should use baseline correction when constructing the epochs. Otherwise the
# covariance matrix will be inaccurate. In MNE this is done by default, but
# just to be sure, we define it here manually.
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed
verbose='error') # and ignore the warning about aliasing
# %%
# Note that this method also attenuates any activity in your
# source estimates that resemble the baseline, if you like it or not.
noise_cov_baseline = mne.compute_covariance(epochs, tmax=0)
# %%
# Plot the covariance matrices
# ----------------------------
#
# Try setting proj to False to see the effect. Notice that the projectors in
# epochs are already applied, so ``proj`` parameter has no effect.
noise_cov.plot(raw_empty_room.info, proj=True)
noise_cov_baseline.plot(epochs.info, proj=True)
# %%
# .. _plot_compute_covariance_howto:
#
# How should I regularize the covariance matrix?
# ----------------------------------------------
#
# The estimated covariance can be numerically
# unstable and tends to induce correlations between estimated source amplitudes
# and the number of samples available. The MNE manual therefore suggests to
# regularize the noise covariance matrix (see
# :ref:`cov_regularization_math`), especially if only few samples are
# available. Unfortunately it is not easy to tell the effective number of
# samples, hence, to choose the appropriate regularization.
# In MNE-Python, regularization is done using advanced regularization methods
# described in :footcite:`EngemannGramfort2015`. For this the 'auto' option
# can be used. With this option cross-validation will be used to learn the
# optimal regularization:
noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto',
rank=None)
# %%
# This procedure evaluates the noise covariance quantitatively by how well it
# whitens the data using the
# negative log-likelihood of unseen data. The final result can also be visually
# inspected.
# Under the assumption that the baseline does not contain a systematic signal
# (time-locked to the event of interest), the whitened baseline signal should
# be follow a multivariate Gaussian distribution, i.e.,
# whitened baseline signals should be between -1.96 and 1.96 at a given time
# sample.
# Based on the same reasoning, the expected value for the :term:`global field
# power (GFP) <GFP>` is 1 (calculation of the GFP should take into account the
# true degrees of freedom, e.g. ``ddof=3`` with 2 active SSP vectors):
evoked = epochs.average()
evoked.plot_white(noise_cov_reg, time_unit='s')
# %%
# This plot displays both, the whitened evoked signals for each channels and
# the whitened :term:`GFP`. The numbers in the GFP panel represent the
# estimated rank of the data, which amounts to the effective degrees of freedom
# by which the squared sum across sensors is divided when computing the
# whitened :term:`GFP`. The whitened :term:`GFP` also helps detecting spurious
# late evoked components which can be the consequence of over- or
# under-regularization.
#
# Note that if data have been processed using signal space separation
# (SSS) :footcite:`TauluEtAl2005`,
# gradiometers and magnetometers will be displayed jointly because both are
# reconstructed from the same SSS basis vectors with the same numerical rank.
# This also implies that both sensor types are not any longer statistically
# independent.
# These methods for evaluation can be used to assess model violations.
# Additional
# introductory materials can be found `here <https://goo.gl/ElWrxe>`_.
#
# For expert use cases or debugging the alternative estimators can also be
# compared (see :ref:`ex-evoked-whitening`) and
# :ref:`ex-covariance-whitening-dspm`):
noise_covs = mne.compute_covariance(
epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True,
rank=None)
evoked.plot_white(noise_covs, time_unit='s')
##############################################################################
# This will plot the whitened evoked for the optimal estimator and display the
# :term:`GFP` for all estimators as separate lines in the related panel.
##############################################################################
# Finally, let's have a look at the difference between empty room and
# event related covariance, hacking the "method" option so that their types
# are shown in the legend of the plot.
evoked_meg = evoked.copy().pick('meg')
noise_cov['method'] = 'empty_room'
noise_cov_baseline['method'] = 'baseline'
evoked_meg.plot_white([noise_cov_baseline, noise_cov], time_unit='s')
##############################################################################
# Based on the negative log-likelihood, the baseline covariance
# seems more appropriate. See :ref:`ex-covariance-whitening-dspm` for more
# information.
# %%
# References
# ----------
#
# .. footbibliography::
|
pravsripad/mne-python
|
tutorials/forward/90_compute_covariance.py
|
Python
|
bsd-3-clause
| 7,983
|
[
"Gaussian"
] |
1459f88b8fe88f6fecae6afbb024e16ec7916831ab8993e408247f4ec66b0915
|
import json
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder
@createClient('DataManagement/FTS3Manager')
class FTS3Client(Client):
""" Client code to the FTS3 service
"""
def __init__(self, url=None, **kwargs):
""" Constructor function.
"""
Client.__init__(self, **kwargs)
self.setServer('DataManagement/FTS3Manager')
if url:
self.setServer(url)
def persistOperation(self, opObj, **kwargs):
""" Persist (insert/update) an FTS3Operation object into the db
:param opObj: instance of FTS3Operation
"""
# In case someone manually set sourceSEs as a list:
if isinstance(opObj.sourceSEs, list):
opObj.sourceSEs = ','.join(opObj.sourceSEs)
opJSON = opObj.toJSON()
return self._getRPC(**kwargs).persistOperation(opJSON)
def getOperation(self, operationID, **kwargs):
""" Get the FTS3Operation from the database
:param operationID: id of the operation
:return: FTS3Operation object
"""
res = self._getRPC(**kwargs).getOperation(operationID)
if not res['OK']:
return res
opJSON = res['Value']
try:
opObj = json.loads(opJSON, cls=FTS3JSONDecoder)
return S_OK(opObj)
except Exception as e:
return S_ERROR("Exception when decoding the FTS3Operation object %s" % e)
def getActiveJobs(self, limit=20, lastMonitor=None, jobAssignmentTag='Assigned', ** kwargs):
""" Get all the FTSJobs that are not in a final state
:param limit: max number of jobs to retrieve
:return: list of FTS3Jobs
"""
res = self._getRPC(**kwargs).getActiveJobs(limit, lastMonitor, jobAssignmentTag)
if not res['OK']:
return res
activeJobsJSON = res['Value']
try:
activeJobs = json.loads(activeJobsJSON, cls=FTS3JSONDecoder)
return S_OK(activeJobs)
except Exception as e:
return S_ERROR("Exception when decoding the active jobs json %s" % e)
def updateFileStatus(self, fileStatusDict, ftsGUID=None, **kwargs):
""" Update the file ftsStatus and error
:param fileStatusDict: { fileID : { status , error, ftsGUID } }
:param ftsGUID: if specified, only update the files having a matchign ftsGUID
"""
return self._getRPC(**kwargs).updateFileStatus(fileStatusDict, ftsGUID)
def updateJobStatus(self, jobStatusDict, **kwargs):
""" Update the job Status and error
:param jobStatusDict: { jobID : { status , error } }
"""
return self._getRPC(**kwargs).updateJobStatus(jobStatusDict)
def getNonFinishedOperations(self, limit=20, operationAssignmentTag="Assigned", **kwargs):
""" Get all the FTS3Operations that have files in New or Failed state
(reminder: Failed is NOT terminal for files. Failed is when fts failed, but we
can retry)
:param limit: max number of jobs to retrieve
:return: json list of FTS3Operation
"""
res = self._getRPC(**kwargs).getNonFinishedOperations(limit, operationAssignmentTag)
if not res['OK']:
return res
operationsJSON = res['Value']
try:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the non finished operations json %s" % e)
def getOperationsFromRMSOpID(self, rmsOpID, **kwargs):
""" Get the FTS3Operations matching a given RMS Operation
:param rmsOpID: id of the operation in the RMS
:return: list of FTS3Operation objects
"""
res = self._getRPC(**kwargs).getOperationsFromRMSOpID(rmsOpID)
if not res['OK']:
return res
operationsJSON = res['Value']
try:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the operations json %s" % e)
|
fstagni/DIRAC
|
DataManagementSystem/Client/FTS3Client.py
|
Python
|
gpl-3.0
| 3,980
|
[
"DIRAC"
] |
6482bf9c5979a5d5645e67f4ff420cb279f5ddb0e3df3658167c92f1905059bc
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
from scipy._lib.six import string_types
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if isinstance(t, string_types):
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
Examples
--------
The following will be used in the examples:
>>> from scipy.signal import chirp, spectrogram
>>> import matplotlib.pyplot as plt
For the first example, we'll plot the waveform for a linear chirp
from 6 Hz to 1 Hz over 10 seconds:
>>> t = np.linspace(0, 10, 5001)
>>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
>>> plt.plot(t, w)
>>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
>>> plt.xlabel('t (sec)')
>>> plt.show()
For the remaining examples, we'll use higher frequency ranges,
and demonstrate the result using `scipy.signal.spectrogram`.
We'll use a 10 second interval sampled at 8000 Hz.
>>> fs = 8000
>>> T = 10
>>> t = np.linspace(0, T, T*fs, endpoint=False)
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=0):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=10):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic',
... vertex_zero=False)
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=2500, f(10)=250\\n' +
... '(vertex_zero=False)')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
Examples
--------
Compute the waveform with instantaneous frequency::
f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
over the interval 0 <= t <= 10.
>>> from scipy.signal import sweep_poly
>>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
>>> t = np.linspace(0, 10, 5001)
>>> w = sweep_poly(t, p)
Plot it:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, w)
>>> plt.title("Sweep Poly\\nwith frequency " +
... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, p(t), 'r', label='f(t)')
>>> plt.legend()
>>> plt.xlabel('t')
>>> plt.tight_layout()
>>> plt.show()
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/signal/waveforms.py
|
Python
|
mit
| 21,039
|
[
"Gaussian"
] |
728777047b024932af77547d85860ee70e58c4c9e4a63a1c799551c8abdaf006
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test macro caller functions """
import os
import sys
sys.path.insert(0, os.path.join(os.pardir, os.pardir))
from unittest import main, TestCase
from module_test import *
from sneakylang.macro_caller import *
from sneakylang.treebuilder import TreeBuilder
from sneakylang.register import Register
#logging.basicConfig(level=logging.DEBUG)
class TestArgumentParsing(TestCase):
def testEmptyArgument(self):
self.assertEquals(None, parse_macro_arguments(u""))
def testSingleWord(self):
self.assertEquals([u"test"], parse_macro_arguments(u"test"))
def testWhitespaceSeparatedWords(self):
self.assertEquals([u"testing", u"args"], parse_macro_arguments(u"testing args"))
def testLongArgumentWithinQuotation(self):
self.assertEquals([u"testing arg"], parse_macro_arguments(u'"testing arg"'))
def testLongArgumentWithinQuotationWithSeparateWord(self):
self.assertEquals([u"testing arg", u"argument"], parse_macro_arguments(u'"testing arg" argument'))
def testCombinationOfQuotedAndSeparatedWords(self):
self.assertEquals([u"arg", u"harg", u"testing arg", u"argument"], parse_macro_arguments(u'arg "harg" "testing arg" argument'))
def testKeywordArgument(self):
self.assertEquals(([], {'argument' : u'testing arg'}), parse_macro_arguments(u'argument="testing arg"', return_kwargs=True))
def testKeywordMustBeNamed(self):
self.assertEquals(([u"blah", u'="testing', u'arg"'], {}), parse_macro_arguments(u'blah ="testing arg"', return_kwargs=True))
class TestHelperFunctions(TestCase):
def test_strip_long_argument_chunk(self):
self.assertEquals((u" aaa", u'"testing chunk"'), strip_long_argument_chunk(u'"testing chunk" aaa', u''))
self.assertEquals((u'"testing chunkaaa', ''), strip_long_argument_chunk(u'"testing chunkaaa', u''))
def test_move_chars(self):
self.assertEquals(('ba', 'a'), move_chars("a", "aba", ""))
self.assertRaises(ValueError, lambda:move_chars("a", "zzz", ""))
def test_nested_macro_chunk(self):
self.assertEquals("((yess))", get_nested_macro_chunk("((yess))"))
class TestMacroCaller(TestCase):
def setUp(self):
self.reg = Register([DummyMacro])
def testContentResolving(self):
self.assertEquals('arg arg', get_content('arg arg))adf'))
self.assertEquals('dummy_macro', get_content('dummy_macro))'))
self.assertEquals(None, get_content('arg arg'))
self.assertEquals(None, get_content('arg arg \n))'))
def testResolveName(self):
self.assertEquals(('dummy_macro', None), resolve_macro_name('dummy_macro'))
self.assertEquals(('dummy_macro', 'arg arg'), resolve_macro_name('dummy_macro arg arg'))
#
def testResolvingFromRegister(self):
self.assertEquals('dummy_macro', resolve_name_from_register('dummy_macro', self.reg))
def testResolvingNameFromMacro(self):
self.assertEquals('dummy_macro', get_macro_name('((dummy_macro))', self.reg))
self.assertEquals('dummy_macro', get_macro_name('((dummy_macro argument argument))', self.reg))
self.assertEquals(None, get_macro_name('((dummy_macro argument argument haha', self.reg))
self.assertEquals(None, get_macro_name('((dummy_macro argument argument \n)) Multiline not allowed', self.reg))
def testMacroExpanding(self):
builder = TreeBuilder(root=DummyNode())
call_macro(DummyMacro, '', Register([DummyMacro]), builder, None)
self.assertEquals(DummyNode, builder.root.children[0].__class__)
res = expand_macro_from_stream('((dummy_macro))', self.reg, TreeBuilder, None)
self.assertEquals(res[0].__class__, DummyMacro)
self.assertEquals(res[1], '')
if __name__ == "__main__":
main()
|
andrejtokarcik/python-sneakylang
|
sneakylang/test/test_macro_caller.py
|
Python
|
bsd-3-clause
| 3,825
|
[
"ADF"
] |
966cd6a2f1a36471fc6b634b7b91d796676d266edcf381d43d7ec338e800ad4f
|
#!/usr/bin/env python
"""
This file is a series of tasks to preprocess COBRE dataset
Installation
------------
It runs on Python > 3.3 or Python2.7 and uses invoke (or Fabric when a Python3 version is released) to execute
the tasks from the command line.
- requirements
pip install invoke
pip install git@github.com:Neurita/boyle.git
- optional requirement (for caching results):
pip install joblib
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import re
import shutil
import logging
import os.path as op
import numpy as np
from functools import partial
from subprocess import Popen, PIPE
from collections import OrderedDict
from boyle.mhd.write import copy_mhd_and_raw
from boyle.commands import which
from boyle.utils.strings import count_hits, where_is
from boyle.utils.text_files import read
from boyle.utils.rcfile import (rcfile, get_sections, get_sys_path, find_in_sections,
get_rcfile_section, get_rcfile_variable_value)
from boyle.files.search import recursive_find_match, check_file_exists
from boyle.files.names import get_extension, remove_ext
from boyle.nifti.cpac_helpers import xfm_atlas_to_functional
from boyle.nifti.roi import partition_timeseries
from boyle.storage import save_variables_to_hdf5
from invoke import task
from invoke import run as local
# setup log
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# read configurations
APPNAME = 'cobre'
try:
CFG = rcfile(APPNAME)
RAW_DIR = op.expanduser(CFG['raw_dir' ])
PREPROC_DIR = op.expanduser(CFG['preproc_dir'])
CACHE_DIR = op.expanduser(CFG['cache_dir' ])
EXPORTS_DIR = op.expanduser(CFG['exports_dir'])
ATLAS_DIR = op.expanduser(CFG['atlas_dir' ])
STD_DIR = op.expanduser(CFG['std_dir' ])
DATA_DIR = PREPROC_DIR
# read files_of_interest section
FOI_CFG = rcfile(APPNAME, 'files_of_interest')
except:
log.exception('Error reading config variable from settings in {} rcfiles.'.format(APPNAME))
raise
def verbose_switch(verbose=False):
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.getLogger().setLevel(log_level)
@task
def clean_cache(cache_dir=CACHE_DIR):
"""Remove joblib cache folder"""
cache_dir = op.expanduser(cache_dir)
log.info('Removing cache folder {}'.format(cache_dir))
shutil.rmtree(cache_dir)
@task(autoprint=True)
def get_rc_sections(app_name=APPNAME):
"""Return the available rcfiles sections"""
sections = get_sections(app_name)
return sections
@task
def show_configuration(app_name=APPNAME, section=None):
""" Show the rcfile configuration variables for the given application.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
section: str
Rcfile section name
"""
cfg = rcfile(app_name, section)
for i in cfg:
print("{} : {}".format(i, cfg[i]))
if section is not None:
return
sections = get_sections(app_name)
for s in sections:
if app_name not in s:
print('')
print('[{}]'.format(s))
cfg = rcfile(app_name, s)
for i in cfg:
print("{} : {}".format(i, cfg[i]))
@task(autoprint=True)
def get_subject_labels(app_name=APPNAME, subj_labels_file_varname='subj_labels'):
""" Return the class labels of all subjects in a list
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_labels_file_varname: str
Name of the rcfile variable that holds the path to the subject labels file.
Returns
-------
labels
list of int
"""
file_path = op.realpath(op.expanduser(CFG.get(subj_labels_file_varname, None)))
if file_path is None:
raise KeyError('Could not find variable {} in {} rcfile.'.format(subj_labels_file_varname, app_name))
return np.loadtxt(file_path, dtype=int, delimiter='\n')
def read_subject_ids_file(app_name=APPNAME, subj_id_list_varname='subj_id_list_file'):
""" Return the content of the subject_id file in a list.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
Returns
-------
subject_ids: list of str
"""
file_path = op.realpath(op.expanduser(CFG.get(subj_id_list_varname, None)))
if file_path is None:
raise KeyError('Could not find variable {} in {} rcfile.'.format(subj_id_list_varname, app_name))
log.debug('Reading list of subject ids from file {}.'.format(file_path))
return read(file_path).split('\n')
@task(autoprint=True)
def get_subject_ids(app_name=APPNAME, subj_id_list_varname='subj_id_list_file', remove_commented=False):
""" Return the class labels of all subjects in a list
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
remove_commented: bool
If True will remove the ids that are commented with a '#'. Will return them all, otherwise.
Returns
-------
subject_ids: list of str
"""
subj_ids = read_subject_ids_file(app_name, subj_id_list_varname)
if remove_commented:
subj_ids = [id for id in subj_ids if not id.startswith('#')]
else:
subj_ids = [id.replace('#', '').strip() for id in subj_ids]
return subj_ids
@task(autoprint=True)
def get_filtered_subjects_ids_and_labels(app_name=APPNAME, subj_id_list_varname='subj_id_list_file',
subj_id_regex_varname='subj_id_regex'):
"""Will use the value of subj_id_regex variable to filter out the subject ids that do not match on the
subj_id_list_file of the rcfile. Will also return filtered labels.
The recommendation is to add a '#' character in front of the IDs that you want excluded from the experiment.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
subj_id_regex_varname: str
Regular expression
Returns
-------
filt_ids: list of str
The subject ids that match the subject_id regex variable from the rcfile.
"""
subj_ids = read_subject_ids_file(app_name, subj_id_list_varname)
labels = get_subject_labels()
matches_subj_regex = partial(re.match, CFG[subj_id_regex_varname])
log.debug('Filtering list of files using subjects ids from subject ids file.')
filt_ids = []
filt_labs = []
for idx, sid in enumerate(subj_ids):
if matches_subj_regex(sid) is not None:
filt_ids.append (sid)
filt_labs.append(labels[idx])
return filt_ids, filt_labs
@task(autoprint=True)
def get_subject_ids_and_labels(filter_by_subject_ids=False):
if filter_by_subject_ids:
subj_ids, labels = get_filtered_subjects_ids_and_labels()
else:
subj_ids = get_subject_ids()
labels = get_subject_labels()
return subj_ids, labels
def filter_list_by_subject_ids(files, subject_ids):
""" Look for matches of each subject_id in the files list, if a match is not found, the file is removed.
The filtered list is then returned.
Parameters
----------
files: list of str
List of file paths that contain a subject id
subject_ids: list of str
List of subject ids that you want included in files
Returns
-------
filtered_list: list of str
List of file paths that contain any of the subject ids in subject_ids
"""
if files is None or not files:
return files
if subject_ids is None or not subject_ids:
return files
log.debug('Filtering list of files using subjects ids.')
filt_files = []
for fn in files:
if any(re.search(sid, fn) for sid in subject_ids):
filt_files.append(fn)
return filt_files
def call_and_logit(cmd, logfile='logfile.log'):
"""Call cmd line with shell=True and saves the output and error output in logfile"""
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if not logfile:
return rc
try:
with open(logfile, 'a') as alog:
alog.write(output.decode("utf-8"))
alog.write( err.decode("utf-8"))
except:
log.exception('Error writing logfile {}.'.format(logfile))
finally:
return rc
@task
def compress_niftis(work_dir=DATA_DIR, verbose=False):
"""Compress nifti files within work_dir using fslchfiletype command."""
if not which('fslchfiletype'):
log.error('Cannot find fslchfiletype to compress NifTi files. Passing.')
return -1
verbose_switch(verbose)
niftis = recursive_find_match(work_dir, '.*nii$')
niftis.sort()
for nii in niftis:
log.debug('Compressing {}'.format(nii))
local('fslchfiletype NIFTI_GZ {}'.format(nii))
@task
def rename_files_of_interest(work_dir=DATA_DIR, verbose=False):
"""Look through the work_dir looking to the patterns matches indicated in the files_of_interest section of the
config file.
For each match it creates a copy of the file in the same folder renamed to the names of the section configuration
option.
This will keep the file extensions and adding '+' characters if there are more than one match.
"""
verbose_switch(verbose)
def copy_file(src, dst):
dirname = op.dirname (src)
ext = get_extension(src)
dst = op.basename (dst)
dst = op.join(dirname, remove_ext(dst))
# add many '+' to the files that have repeated names
#while op.exists(dst + ext):
# dst += '+'
#dst += ext
# add a counter value to the files that have repeated names
if op.exists(dst + ext):
fc = 2
while op.exists(dst + str(fc) + ext):
fc += 1
dst += str(fc) + ext
# copy the src file to the given dst value
try:
if ext == '.mhd':
return copy_mhd_and_raw(src, dst)
else:
shutil.copyfile(src, dst)
return dst
except:
log.exception('Error copying file {} to {}.'.format(src, dst))
raise
def has_mhd_with_raws(files):
"""Return True if the number of .raw files is the same as the number of .mhd files"""
return count_hits(files, '.*\.raw$') == count_hits(files, '.*\.mhd$')
for foi in FOI_CFG:
regex = FOI_CFG[foi]
files = recursive_find_match(work_dir, regex)
files.sort()
if not files:
log.error('Could not find {} files that match {} within {}.'.format(foi, regex, work_dir))
continue
use_copy_mhd_and_raw = has_mhd_with_raws(files)
log.debig('Copying {} to {}.'.format(regex, foi))
for fn in files:
ext = get_extension(fn)
if use_copy_mhd_and_raw:
if ext == '.raw':
continue
new_fn = op.join(op.dirname(fn), foi) + ext
try:
new_dst = copy_file(fn, new_fn)
except:
msg = 'Error copying file {} to {}.'.format(fn, new_fn)
log.exception(msg)
raise IOError(msg)
if not op.exists(new_dst):
msg = 'Error copying file {} to {}. After trying to copy, the file does not exist.'.format(fn, new_dst)
log.error(msg)
@task
def remove_files_of_interest(work_dir=DATA_DIR, verbose=True):
"""Look through the work_dir looking to the patterns matches indicated in the files_of_interest section of
the config file and remove them.
"""
verbose_switch(verbose)
for foi in FOI_CFG:
regex = get_file_of_interest_regex(foi)
log.info('Removing within {} that match {}.'.format(len(work_dir), regex))
remove_files(regex, work_dir, verbose)
@task
def remove_files(pattern, work_dir=DATA_DIR, verbose=False):
"""Look through the work_dir looking to the patterns matches the pattern argument value and remove them.
"""
verbose_switch(verbose)
import sys
try:
from distutils.util import strtobool
raw_input = input
except:
from distutils import strtobool
def prompt(query):
sys.stdout.write('%s [y/n]: ' % query)
val = raw_input()
try:
ret = strtobool(val)
except ValueError:
sys.stdout.write('Please answer with a y/n\n')
return prompt(query)
return ret
files = find_files(work_dir, pattern)
if not files:
log.info('Could not find files that match r"{0}" within {1}.'.format(pattern, work_dir))
return
log.info('\n'.join(files))
if prompt('Found these files. Want to remove?'):
for fn in files:
log.debug('Removing {}.'.format(fn))
os.remove(fn)
@task(autoprint=True)
def find_files(work_dir, regex):
""" Returns a list of the files that match the regex value within work_dir.
Parameters
----------
work_dir: str
Path of the root folder from where to start the search.s
regex: str
Name of the variable in files_of_interest section.
"""
try:
check_file_exists(work_dir)
except:
return []
files = recursive_find_match(work_dir, regex)
files.sort()
return files
@task(autoprint=True)
def get_file_of_interest_regex(name, app_name=APPNAME):
"""Return the regex of the name variable in the files_of_interest section of the app rc file."""
return get_rcfile_variable_value(name, 'files_of_interest', app_name)
def print_list(alist):
[print(i) for i in alist]
@task
def show_regex_match(regex, work_dir=DATA_DIR, filter_by_subject_ids=False):
"""Lists the files inside work_dir that match the name of the given regex.
Parameters
----------
regex: str
Regular expession
work_dir: str
Path of the root folder from where to start the search.
Or, if the given name is not an existing path, name of the rcfile variable that contains the folder path.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
try:
check_file_exists(work_dir)
except:
work_dir = op.expanduser(CFG[work_dir])
files = find_files(work_dir, regex)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
files = filter_list_by_subject_ids(files, subj_ids)
if not files:
log.info('No files that match "{}" found in {}.'.format(regex, work_dir))
else:
log.info('# Files that match "{}" in {}:'.format(regex, work_dir))
print_list(files)
@task
def show_files(name, work_dir=DATA_DIR, filter_by_subject_ids=False):
"""Show a list of the files inside work_dir that match the regex value of the variable 'name' within the
files_of_interest section.
Parameters
----------
name: str
Name of the variable in files_of_interest section.
work_dir: str
Path of the root folder from where to start the search.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
try:
regex = get_file_of_interest_regex(name)
except:
raise
log.debug('Looking for files that match {} within {}.'.format(regex, work_dir))
files = find_files(work_dir, regex)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
files = filter_list_by_subject_ids(files, subj_ids)
if not files:
log.error('No files that match "{}" found in {}.'.format(regex, work_dir))
else:
log.debug('# Files that match "{}" in {}:'.format(regex, work_dir))
print_list(files)
return files
@task
def show_my_files(rcpath, app_name=APPNAME, filter_by_subject_ids=False):
"""Shows the files within the rcpath, i.e., a string with one '/', in the
format <variable of folder path>/<variable of files_of_interest regex>.
Parameters
----------
rcpath: str
A path with one '/', in the format <variable of folder path>/<variable of files_of_interest regex>.
For example: 'data_dir/anat' will look for the folder path in the data_dir variable and the regex in the
anat variable inside the files_of_interest section.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
if '/' not in rcpath:
log.error("Expected an rcpath in the format <variable of folder path>/<variable of files_of_interest regex>.")
return -1
dir_name, foi_name = rcpath.split('/')
app_cfg = rcfile(app_name)
if dir_name not in app_cfg:
log.error("Option {} not found in {} section.".format(dir_name, app_name))
return -1
work_dir = op.expanduser(app_cfg[dir_name])
return show_files(foi_name, work_dir, filter_by_subject_ids=filter_by_subject_ids)
@task
def clean():
"""Remove a few temporal files and logs in this folder."""
local('rm *.log')
local('rm *.pyc')
shutil.rmtree('__pycache__')
@task(autoprint=True)
def get_standard_file(file_name_varname, app_name=APPNAME):
""" Return the path to an atlas or a standard template file.
Looking for 'standard' and 'atlas' section in rcfiles.
Parameters
----------
file_name_varname: str
app_name: str
Returns
-------
std_path: str
Path to the atlas or the standard template.
"""
section_name, var_value = find_in_sections(file_name_varname, app_name)
if section_name == 'atlases':
std_path = op.join(ATLAS_DIR, var_value)
elif section_name == 'standard':
std_path = op.join(STD_DIR, var_value)
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_name_varname, section_name))
return std_path
#
# @task
# def create_cpac_subj_list(anat_file_var='raw_anat', rest_files_vars=['raw_rest'],
# output='CPAC_subject_list_file.yaml',
# filter_by_subject_ids=False, verbose=False):
# """Create a C-PAC subject list file including the path to the files represented by the variables in
# conf_variables.
#
# Parameters
# ----------
# anat_file_var: str
# Variable name in the application rcfiles which hold the name of the subject anatomical file.
#
# rest_files_vars: list of str
# List of variable names in the application rcfiles which hold the name of the subject fMRI files.
#
# output: str
# Path of the output file
#
# filter_by_subject_ids: bool
# If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
# and let only matches to the subject ID list values.
#
# verbose: bool
# If True will show debug logs.
# """
# import yaml
#
#
@task
def run_cpac(cpac_pipeline_file_varname='cpac_pipeline_file', verbose=False):
"""Execute cpac_run.py using the configuration from the rcfile"""
try:
conf_dir = op.realpath(op.join(op.dirname(__file__), CFG['cpac_conf'] ))
subjects_list = op.realpath(op.join(conf_dir, CFG['cpac_subjects_list'] ))
pipeline_file = op.realpath(op.join(conf_dir, CFG[cpac_pipeline_file_varname]))
except KeyError as ke:
log.exception(ke)
raise
verbose_switch(verbose)
cpac_cmd = 'cpac_run.py'
cpac_path = which(cpac_cmd)
if cpac_path is None:
log.error('Could not find {} command.'.format(cpac_cmd))
return -1
if op.exists('cpac.log'):
log.debug('Remove cpac.log file.')
os.remove('cpac.log')
cmd = '"{}" "{}" "{}"'.format(cpac_path, pipeline_file, subjects_list)
log.debug('Calling: {}'.format(cmd))
log.info ('Logging to cpac.log')
# print('import CPAC')
# print('CPAC.pipeline.cpac_runner.run("{}", "{}")'.format(pipeline_file, subjects_list))
call_and_logit(cmd, 'cpac.log')
# ----------------------------------------------------------------------------------------------------------------------
# COBRE PROJECT SPECIFIC FUNCTIONS
# ----------------------------------------------------------------------------------------------------------------------
OLD_COBRE_DIR = op.expanduser(CFG.get('old_cobre_dir', ''))
OLD_COBRE_CFG = rcfile(APPNAME, 'old_cobre')
SUBJ_ID_REGEX = CFG['subj_id_regex']
FSURF_DIR = op.expanduser(CFG['fsurf_dir'])
PREPROC_DIR = OLD_COBRE_DIR
@task
def recon_all(input_dir=RAW_DIR, out_dir=FSURF_DIR, use_cluster=True, verbose=False, filter_by_subject_ids=False):
"""Execute recon_all on all subjects from input_dir/raw_anat
Parameters
----------
input_dir: str
Path to where the subjects are
out_dir: str
Path to output folder where freesurfer will leave results.
use_cluster: bool
If True will use fsl_sub to submit the jobs to your set up cluster queue. This is True by default.
Use the flag -c to set it to False.
verbose: bool
If True will show debug logs.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
os.environ['SUBJECTS_DIR'] = out_dir
regex = get_file_of_interest_regex('raw_anat')
subj_anats = find_files(input_dir, regex)
subj_reg = re.compile(SUBJ_ID_REGEX)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
subj_anats = filter_list_by_subject_ids(subj_anats, subj_ids)
recon_all = which('recon-all')
for subj_anat_path in subj_anats:
subj_id = subj_reg.search(subj_anat_path).group()
#recon-all -all -i raw/0040000/session_1/anat_1/mprage.nii.gz -s 0040000
cmd = '{} -all -i {} -s {} -sd {}'.format(recon_all, subj_anat_path, subj_id, out_dir)
if use_cluster:
cmd = 'fsl_sub ' + cmd
log.debug('Calling {}'.format(cmd))
call_and_logit(cmd, 'freesurfer_{}.log'.format(subj_id))
def show_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', filter_by_subject_ids=False, app_name=APPNAME):
""" Return a list of the file_name_varname files in the corresponding pipeline.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
Returns
-------
fois
list of matched filepaths
"""
print_list(get_pipeline_files(root_dir=root_dir, section_name=section_name, pipe_varname=pipe_varname,
file_name_varname=file_name_varname, filter_by_subject_ids=filter_by_subject_ids,
app_name=app_name))
@task(autoprint=True)
def get_pipeline_folder(root_dir=PREPROC_DIR, pipe_section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
app_name=APPNAME):
pipe_dirpath = get_rcfile_variable_value(pipe_varname, section_name=pipe_section_name, app_name=app_name)
root_dir = get_sys_path (root_dir, section_name=pipe_section_name, app_name=app_name)
return op.join(root_dir, pipe_dirpath)
@task(autoprint=True)
def get_pipeline_files(root_dir=PREPROC_DIR, pipe_section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', filter_by_subject_ids=False, app_name=APPNAME):
"""See show_pipeline_files."""
pipe_dir = get_pipeline_folder(root_dir=root_dir, pipe_varname=pipe_varname,
pipe_section_name=pipe_section_name, app_name=app_name)
section_name, var_value = find_in_sections(file_name_varname, app_name)
if section_name == 'files_of_interest':
varname = var_value
log.debug('Looking for {} files from pipe {} within {} folder'.format(varname, pipe_varname, pipe_dir))
files = find_files(pipe_dir, varname)
elif section_name == 'relative_paths':
varname = get_rcfile_variable_value('funcfiltx', section_name='files_of_interest', app_name=app_name)
relpath = var_value
log.debug('Looking for {} files from pipe {} within {} folder'.format(varname, pipe_varname, pipe_dir))
files = [op.join(pipe_dir, subj_f, relpath) for subj_f in os.listdir(pipe_dir)]
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_name_varname, section_name))
if filter_by_subject_ids:
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
files = filter_list_by_subject_ids(files, subj_ids)
log.debug('Found {} files that match the file name in pipeline folder {}.'.format(len(files), pipe_dir))
return files
@task
def show_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', verbose=False, filter_by_subject_ids=False):
"""Return a list of the file_name_varname files in the corresponding pipeline.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
pipe_files = get_pipeline_files(root_dir, section_name, pipe_varname, file_name_varname,
filter_by_subject_ids=filter_by_subject_ids)
if not pipe_files:
log.info('Could not find {} files from pipe {} within {} folder'.format(file_name_varname, pipe_varname, root_dir))
else:
print_list(pipe_files)
@task
def pack_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', mask_file_varname='brain_mask_dil_3mm', smooth_fwhm=0,
output_file='cobre_reho_pack.mat', verbose=False, filter_by_subject_ids=False):
"""Mask and compress the data into a file.
Will save into the file: data, mask_indices, vol_shape
data: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
mask_file_varname: str
RCfile variable name for the mask file that you want to use to mask the data.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
mask_file = None
if mask_file_varname:
mask_file = op.join(op.expanduser(CFG['std_dir']), FOI_CFG[mask_file_varname])
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
pipe_files = get_pipeline_files(root_dir, section_name, pipe_varname, file_name_varname,
filter_by_subject_ids=filter_by_subject_ids)
pipe_files.sort()
if not pipe_files:
log.info('Could not find {} files from pipe {} '
'within {} folder'.format(file_name_varname, pipe_varname, root_dir))
exit(-1)
log.debug('Parsing {} subjects into a Nifti file set.'.format(len(pipe_files)))
try:
_pack_files_to(pipe_files, mask_file=mask_file, labels=labels, subj_ids=subj_ids, smooth_fwhm=smooth_fwhm,
output_file=output_file, verbose=verbose)
except:
log.exception('Error creating the set of subjects from {} files '
'from pipe {} within {} folder'.format(file_name_varname, pipe_varname, root_dir))
raise
def _pack_files_to(images, output_file, mask_file=None, labels=None, subj_ids=None, smooth_fwhm=0, verbose=False):
"""Get NeuroImage files mask them, put all the data in a matrix and save them into
output_file together with mask shape and affine information and labels.
Will save into the file: data, mask_indices, vol_shape, labels
data: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
Parameters
----------
images: list of str or img-like object.
See boyle.nifti.NeuroImage constructor docstring.
mask: str or img-like object.
See boyle.nifti.NeuroImage constructor docstring.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
subj_ids: list or tuple of str
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
"""
from boyle.nifti.sets import NeuroImageSet
verbose_switch(verbose)
try:
subj_set = NeuroImageSet(images, mask=mask_file, labels=labels, all_compatible=True)
subj_set.others['subj_ids'] = np.array(subj_ids)
except:
raise
else:
log.debug('Saving masked data into file {}.'.format(output_file))
subj_set.to_file(output_file, smooth_fwhm=smooth_fwhm)
@task
def pack_files(name, output_file, work_dir=DATA_DIR, mask_file=None, labels=None, subj_ids=None, smooth_fwhm=0,
verbose=False, filter_by_subject_ids=False):
"""Pack a list of the files inside work_dir that match the regex value of the variable 'name' within the
files_of_interest section.
Parameters
----------
name: str
Name of the variable in files_of_interest section.
work_dir: str
Path of the root folder from where to start the search.s
mask: str
RCfile variable name for the mask file that you want to use to mask the data.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
subj_ids: list or tuple of str
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
if mask_file is not None:
mask_file = op.join(op.expanduser(CFG['std_dir']), CFG[mask_file])
check_file_exists(mask_file)
try:
images = show_files(name, work_dir=work_dir, filter_by_subject_ids=filter_by_subject_ids)
except:
raise
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if images:
_pack_files_to(images, output_file, mask_file=mask_file, labels=labels, subj_ids=subj_ids,
smooth_fwhm=smooth_fwhm, verbose=verbose)
@task
def pack_my_files(rcpath, output_file, app_name=APPNAME, mask_file=None, labels=None, smooth_fwhm=0,
verbose=False, filter_by_subject_ids=False):
"""Pack a list of the files inside within the rcpath, i.e., a string with one '/', in the
format <variable of folder path>/<variable of files_of_interest regex>.
Parameters
----------
rcpath: str
A path with one '/', in the format <variable of folder path>/<variable of files_of_interest regex>.
For example: 'data_dir/anat' will look for the folder path in the data_dir variable and the regex in the
anat variable inside the files_of_interest section.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
mask_file: str
RCfile variable name for the mask file that you want to use to mask the data.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
if mask_file is not None:
mask_file = get_standard_file(mask_file)
check_file_exists(mask_file)
try:
images = show_my_files(rcpath, app_name=app_name, filter_by_subject_ids=filter_by_subject_ids)
except:
raise
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if images:
_pack_files_to(images, output_file, mask_file=mask_file, labels=labels, subj_ids=subj_ids,
smooth_fwhm=smooth_fwhm, verbose=verbose)
def get_cobre_export_data(root_dir=EXPORTS_DIR, section_name='old_cobre', type='timeseries', regex='',
app_name=APPNAME):
"""
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
type: str
Type of the data within the exported file archive. Choices:
'timeseries' - for smoothed or not raw fMRI timeseries data
'scalar_activity' - for local activity measures from fMRI timeseries data, e.g., reho, alff, etc.
regex: str
Regular expression to match with the archive file name.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Results
-------
list
List of export files found
"""
type_choices = {'timeseries', 'scalar_activity'}
try:
settings = get_rcfile_section(app_name, section_name)
feats_dir_name = settings['features_dir']
ts_feats_dir_name = settings['timeseries_feats_dir']
scalar_feats_dir_name = settings['scalar_wtemp_noglob_feats_dir']
except IOError:
raise
except:
msg = 'Error looking for variable names in {} rc file in section {}.'.format(app_name, section_name)
log.exception (msg)
raise KeyError(msg)
if type == 'timeseries':
work_dir = op.join(root_dir, feats_dir_name, ts_feats_dir_name)
elif type == 'scalar_activity':
work_dir = op.join(root_dir, feats_dir_name, scalar_feats_dir_name)
else:
msg = 'Expected type variable value of {} but got {}.'.format(type_choices, type)
log.error(msg)
raise ValueError(msg)
files = find_files(work_dir, regex)
if len(files):
log.debug('Found the following export data files: {}.'.format(files))
else:
log.debug('Did not found any export data files within {} with te regex {}.'.format(work_dir, regex))
return files
def get_cobre_export_timeseries(root_dir=EXPORTS_DIR, section_name='old_cobre', fwhm='4mm'):
"""
See get_cobre_export_data.
Parameters
----------
fwhm: str
Part of the file name with information of FWHM smoothing kernel size, e.g.: '0mm' or '4mm'
Returns
-------
List of files found
"""
regex = '.*' + fwhm + '.*'
return get_cobre_export_data(root_dir, section_name=section_name, type='timeseries', regex=regex)
def get_cobre_export_scalar_data(root_dir=EXPORTS_DIR, section_name='old_cobre', type='reho', pipeline='wtemp_noglob'):
"""
See get_cobre_export_data.
Parameters
----------
type: str
Type of scalar fMRI-based activity measure, e.g., 'reho', 'alff', 'falff', 'vmhc'
pipeline: str
Pipeline configuration for
Returns
-------
List of files found
"""
regex = '.*' + type + '.*' + pipeline + '.*'
return get_cobre_export_data(root_dir, section_name=section_name, type='scalar_activity', regex=regex)
def has_the_correct_subject_order(alist, filter_by_subject_ids=False):
"""Using the subject id list from get_subject_ids_and_labels will match alist for the same length and order.
Parameters
----------
alist: list of str or list of str
If list of string will re.search each string item using the corresponding subject id.
If list of lists of string will look within each sub-list for an exact match of the corresponding subject id.
Returns
-------
has_the_correct_order: bool
Will return False with any error, length mismatch or element without subject id match.
True otherwise.
"""
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
if len(ids) < 1:
msg = 'The list of subjects ids is empty. Expected something else.'
log.error(msg)
return False
if len(alist) < 1:
msg = 'The given list to be checked is empty. Expected something else.'
log.error(msg)
return False
if len(ids) != len(alist):
msg = 'The length of the given list and the list of subject ids are different. Expected the same length, ' \
'got {} and {}. The fist element of the given list is {}.'.format(len(alist), len(ids), alist[0])
log.error(msg)
return False
for items in zip(ids, alist):
if isinstance(items[1], str):
if re.search(items[0], items[1]) is None:
return False
elif isinstance(items[1], list):
if where_is(items[1], items[0], lookup_func=re.search) < 0:
return False
else:
log.error('The given list element type is {}. Expected str or list of str.'
'The first element of the given list is {}'.format(type(items[1]), alist[0]))
return False
return True
@task(autoprint=True)
def get_subject_folders(work_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_noglob',
app_name=APPNAME, verbose=False, filter_by_subject_ids=False, check_order=True):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
work_dir: str (optional)
Root folder path
section_name: str (optional)
Name of the section in the rcfiles to look for the pipe_varname argument value.
pipe_varname: str (optional)
Name of the variable in the rcfiles which hold the path to the desired pipeline to look for the subject folder.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
# get the pipeline folder path
pipe_dirpath = get_pipeline_folder(root_dir=work_dir, pipe_section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name)
folders = [op.join(pipe_dirpath, subj_f) for subj_f in os.listdir(pipe_dirpath)]
subj_ids, _ = get_subject_ids_and_labels(filter_by_subject_ids)
subj_folders = []
for idx, sid in enumerate(subj_ids):
fidx = where_is(folders, sid, lookup_func=re.search)
if fidx >= 0:
subj_folders.append(folders[fidx])
# check that func and ids have the same length and match
if check_order:
if not has_the_correct_subject_order(subj_folders, filter_by_subject_ids=filter_by_subject_ids):
raise IOError('The list of subject folders found in {} and the list of subject '
'ids do not match.'.format(pipe_dirpath))
return subj_folders
@task(autoprint=True)
def get_subject_folder(subj_id, work_dir=PREPROC_DIR, section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
work_dir: str (optional)
Root folder path
section_name: str (optional)
Name of the section in the rcfiles to look for the pipe_varname argument value.
pipe_varname: str (optional)
Name of the variable in the rcfiles which hold the path to the desired pipeline to look for the subject folder.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=False)
return get_subject_folder_from_list(subj_id, file_list=subj_folders, verbose=verbose)
@task(autoprint=True)
def get_subject_file(file_varname, subj_dir, check_exists=True, app_name=APPNAME):
""" Return the filepath for the rcfile file_varname for the given subject folder.
Parameters
----------
file_varname: str
subj_dir: str
check_exists: bool
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
filepath: str
"""
section_name, var_value = find_in_sections(file_varname, app_name)
if section_name == 'files_of_interest':
filepath = find_files(subj_dir, var_value)
if isinstance(filepath, list):
if len(filepath) == 1:
filepath = filepath[0]
else:
raise IOError('Found more than one file {} within {}.'.format(var_value, subj_dir))
elif section_name == 'relative_paths':
filepath = op.join(subj_dir, var_value)
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_varname, section_name))
if check_exists:
if not op.exists(filepath):
raise IOError('File {} not found.'.format(filepath))
return filepath
@task(autoprint=True)
def get_subject_folder_from_list(subj_id, file_list=None, verbose=False):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
file_list: list of str (optional)
List of file paths which will be looked through to find the subject folder. All other variables will be ignored.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
# check that func and ids have the same length and match
if not has_the_correct_subject_order([f.split(op.sep) for f in file_list]):
msg = 'The list of functional files found and the list of subject ids do not match.'
raise RuntimeError(msg)
ids, _ = get_subject_ids_and_labels()
idx = where_is(ids, subj_id)
functional = file_list[idx]
# find the subject root dir
subjid_idx = where_is(functional.split(op.sep), subj_id, lookup_func=re.search)
subj_dir = os.sep.join(functional.split(op.sep)[0:subjid_idx + 1])
return subj_dir
@task
def slicesdir(underlying, outline=None, work_dir=PREPROC_DIR, section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', verbose=False, filter_by_subject_ids=False, axials=False):
""" Call slicesdir using the relative file paths in the files_of_interest section.
Parameters
----------
underlying: str
A files_of_interest relative file path variable, that will be used to look for the volume files that will
be used as background in the slices images.
outline: str
If is a path to a file, this will be used as red-outline image on top of all images in underlying.
If a files_of_interest relative file path variable, will match this list with the underlying subjects list
and use each of them as red-outline image on top of the corresponding underlying image.
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
axials: bool
If True will output every second axial slice rather than just 9 ortho slices.
verbose: bool
If verbose will show DEBUG log info.
"""
verbose_switch(verbose)
# slicesdir
slicesdir = op.join(os.environ['FSLDIR'], 'bin', 'slicesdir')
# get the list of functional files for the given pipeline
funcs = get_pipeline_files(root_dir=work_dir, section_name=section_name,
pipe_varname=pipe_varname, filter_by_subject_ids=filter_by_subject_ids,
file_name_varname='funcfiltx')
# check that func and ids have the same length and match
if not has_the_correct_subject_order([f.split(op.sep) for f in funcs]):
msg = 'The list of functional files found and the list of subject ids do not match.'
raise RuntimeError(msg)
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
outline_filepath = ''
outline_is_one = False
if outline is not None:
if op.exists(outline):
outline_filepath = outline
outline_is_one = True
# get relative filepaths
underlying_filepath = get_file_of_interest_regex(underlying)
log.debug('Using as background image: {}'.format(underlying_filepath))
if outline is not None and not outline_filepath:
outline_filepath = get_file_of_interest_regex(outline)
log.debug('Using as red outline image: {}'.format(outline_filepath))
underlyings = []
outlines = []
for idx, subj_id in enumerate(ids):
subj_dir = get_subject_folder_from_list(subj_id, file_list=funcs, verbose=verbose)
underlying = op.join(subj_dir, underlying_filepath)
if not op.exists(underlying):
raise IOError('Could not find file {}.'.format(underlying))
underlyings.append(underlying)
if not outline_is_one and outline_filepath:
subj_outline = op.join(subj_dir, outline_filepath)
if not op.exists(subj_outline):
raise IOError('Could not find file {}.'.format(subj_outline))
outlines.append(subj_outline)
args = ' '
if axials:
args += '-S '
if outlines:
args += '-o '
args += ' '.join(['{} {}'.format(i, j) for i, j in zip(underlyings, outlines)])
elif outline_is_one:
args += '-p {} '.format(outline_filepath)
args += ' '.join(underlyings)
else:
args += ' '.join(underlyings)
cmd = slicesdir + args
log.debug('Running: {}'.format(cmd))
local(cmd)
@task(autoprint=True)
def register_atlas_to_functionals(work_dir=PREPROC_DIR, atlas='aal_3mm', anat_out_var='aal_3mm_anat',
func_out_var='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', verbose=False, filter_by_subject_ids=False,
parallel=False, app_name=APPNAME):
"""Apply the existent transformation from MNI standard to functional MRI to an atlas image in MNI space.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
anat_out_var: str
Variable name that holds the file name of the resulting registered atlas in a specific subject functional
space.
func_out_var: str
Variable name that holds the file name of the resulting registered atlas in a specific subject functional
space.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
parallel: bool
If True will launch the commands using ${FSLDIR}/fsl_sub to use the cluster infrastructure you have setup
with FSL (SGE or HTCondor).
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
verbose_switch(verbose)
try:
atlas_filepath = get_standard_file(atlas)
except:
atlas_filepath = atlas
if not op.exists(atlas_filepath):
raise IOError('Could not find atlas file {}.'.format(atlas_filepath))
#read relative filepaths
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=filter_by_subject_ids,
check_order=True)
for subj_path in subj_folders:
find_subject_file_and_check = partial(get_subject_file, subj_dir=subj_path, check_exists=True)
anat_brain = find_subject_file_and_check('anat_brain' )
avg_func = find_subject_file_and_check('mean_func' )
atlas2anat_lin = find_subject_file_and_check('anat_to_mni_mat' )
atlas2anat_nlin = find_subject_file_and_check('anat_to_mni_nl' )
anat2func_lin = find_subject_file_and_check('anat_to_func_mat')
atlas_in_anat = get_subject_file(anat_out_var, subj_dir=subj_path, check_exists=False)
atlas_in_func = get_subject_file(func_out_var, subj_dir=subj_path, check_exists=False)
log.debug('Registering atlas to functional: {}.\n'.format(' ,'.join([anat_brain, avg_func, atlas2anat_lin,
atlas2anat_nlin, anat2func_lin,
atlas_in_anat,
atlas_in_func])))
xfm_atlas_to_functional(atlas_filepath, anat_brain, avg_func, atlas2anat_lin, atlas2anat_nlin, False,
anat2func_lin, atlas_in_anat, atlas_in_func, interp='nn', verbose=verbose,
rewrite=False, parallel=parallel)
@task(autoprint=True)
def get_atlaspartition_hdf5path(subj_id, pipe_varname='pipe_wtemp_noglob', atlas='aal_3mm_func'):
""" Return the hdf5 path for the atlas partition for the subject timeseries in the pipeline.
Parameters
----------
pipe_varname:
Pipeline variable name.
atlas:
Atlas variable name
subj_id: str
Subject ID
Returns
-------
hdf5path: str
"""
return '/{}_{}_timeseries/{}'.format(pipe_varname, atlas, subj_id)
@task(autoprint=True)
def get_atlaspartition_hdf5_filepath(atlas='aal_3mm_func', app_name=APPNAME):
""" Return the path of the HDF5 file which contains the atlas partition timeseries.
atlas: str
Atlas variable name
app_name: str
Returns
-------
hdf5_filepath: str
"""
if atlas == 'atlas_3mm_func':
return op.join(EXPORTS_DIR, get_rcfile_variable_value('out_aal_timeseries', app_name=app_name))
else:
raise ValueError('Expected the name of a valid atlas variable name as `atlas_3mm_func`, '
'but got {}.'.format(atlas))
@task(autoprint=True)
def get_connectivity_hdf5_filepath(atlas='aal_3mm_func', app_name=APPNAME):
""" Return the path of the HDF5 file which contains the connectivity matrices.
atlas: str
Atlas variable name
app_name: str
Returns
-------
hdf5_filepath: str
"""
if atlas == 'atlas_3mm_func':
return op.join(EXPORTS_DIR, get_rcfile_variable_value('out_aal_connectivities', app_name=app_name))
else:
raise ValueError('Expected the name of a valid atlas variable name as `atlas_3mm_func`, '
'but got {}.'.format(atlas))
@task
def save_atlas_timeseries_packs(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
filter_by_subject_ids=False):
""" Save the atlas partitioned timeseries into an HDF5 file.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
verbose_switch(verbose)
subj_timeseries = atlas_partition_timeseries(work_dir=work_dir, atlas=atlas, section_name=section_name,
pipe_varname=pipe_varname, app_name=app_name, verbose=verbose,
filter_by_subject_ids=filter_by_subject_ids)
timeseries_filepath = get_atlaspartition_hdf5_filepath(atlas, app_name=app_name)
for subj_id in subj_timeseries:
# save_ts_pack into HDF file.
h5path = get_atlaspartition_hdf5path(subj_id, pipe_varname=pipe_varname, atlas=atlas)
log.debug('Saving {} {} partitioned functional timeseries in '
'{} group {}.'.format(subj_id, atlas, timeseries_filepath, h5path))
save_variables_to_hdf5(timeseries_filepath, {'{}_timeseries'.format(atlas): subj_timeseries[subj_id]}, mode='a',
h5path=h5path)
def atlas_partition_timeseries(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
filter_by_subject_ids=False):
""" Return a dictionary with each subject's timeseries partitioned by the atlas file.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_timeseries: dict
"""
verbose_switch(verbose)
#read relative filepaths
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=filter_by_subject_ids,
check_order=True)
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
subj_timeseries = OrderedDict()
for idx, subj_path in enumerate(subj_folders):
subj_id = ids[idx]
find_subject_file_and_check = partial(get_subject_file, subj_dir=subj_path, check_exists=True)
funcbrainmask = find_subject_file_and_check('funcbrainmask' )
functional = find_subject_file_and_check('func_freq_filtered')
atlas_in_func = find_subject_file_and_check(atlas )
log.debug('Partitioning subject {} timeseries in {} using atlas {}.'.format(subj_id, functional, atlas_in_func))
subj_atlas_ts = partition_timeseries(functional, atlas_in_func, funcbrainmask, zeroe=True, roi_values=None,
outdict=True)
subj_timeseries[subj_id] = subj_atlas_ts
return subj_timeseries
#@task
#def save_connectivity_matrices(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
# pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
# filter_by_subject_ids=False):
""" Save the connectivity matrices of with each subject's timeseries partitioned by the atlas file into an HDF5
file.
The file will be saved in exports
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
# connectitity_filepath = get_connectivity_hdf5_filepath(atlas, app_name=APPNAME)
#def create_connectivity_matrices(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
# pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
# filter_by_subject_ids=False):
""" Return a dictionary with each subject's timeseries partitioned by the atlas file.
Parameters
----------
# work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_timeseries: dict
"""
# h5path = get_atlaspartition_hdf5path(subj_id, pipe_varname=pipe_varname, atlas=atlas)
# timeseries_filepath = get_atlaspartition_hdf5_filepath(atlas, app_name=app_name)
# connectitity_filepath = get_connectivity_hdf5_filepath(atlas, app_name=APPNAME)
#load_variables_from_hdf5
#ts = h5py.File('/Users/alexandre/Dropbox (Neurita)/projects/cobre/cobre_partitioned_timeseries.hdf5')
|
alexsavio/cobre
|
fabfile.py
|
Python
|
bsd-3-clause
| 68,111
|
[
"Gaussian"
] |
7a1649b37d01e99c61e30449aa57780dc217c296f0de17a5909d1bc2267de042
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
readme = f.read()
# Extras requirements for optional dependencies
extras = {
'analysis': ['weblogo>=3.7'],
'all': ['weblogo>=3.7']
}
# Version number must be in sync with the one in pbxplore/__init__.py
setup(
name='pbxplore',
version='1.4.0',
description="PBxplore is a suite of tools dedicated to Protein Block analysis.",
long_description=readme,
url='https://github.com/pierrepo/PBxplore',
# Author details
author='Pierre Poulain',
author_email='pierre.poulain@cupnet.net',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
setup_requires=['pytest-runner'],
python_requires='>=3.6',
install_requires=['numpy', 'MDAnalysis>=0.11', 'matplotlib'],
tests_require=['pytest', 'pytest-raises', 'coverage'],
# List additional groups of dependencies here
# To install, use
# $ pip install -e .[analysis]
extras_require=extras,
packages=find_packages(exclude=['test']),
include_package_data=True,
package_data={'pbxplore': ['demo/*']},
entry_points={
'console_scripts': [
'PBassign = pbxplore.scripts.PBassign:pbassign_cli',
'PBcount = pbxplore.scripts.PBcount:pbcount_cli',
'PBstat = pbxplore.scripts.PBstat:pbstat_cli',
],
},
)
|
HubLot/PBxplore
|
setup.py
|
Python
|
mit
| 2,043
|
[
"MDAnalysis"
] |
d74232c9dab0d7726774d0d9c19c4c20f1fd9fee1609a425b1fc9b7977dfecae
|
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
# When we plot Hist, Pmf and Cdf objects, they don't appear in
# the legend unless we override the default label.
DEFAULT_LABEL = '_nolegend_'
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, str(self.d))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, repr(self.d))
else:
return '%s(%s, %s)' % (cls, repr(self.d), repr(self.label))
def __eq__(self, other):
try:
return self.d == other.d
except AttributeError:
return False
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def SortedItems(self):
"""Gets a sorted sequence of (value, freq/prob) pairs.
It items are unsortable, the result is unsorted.
"""
def isnan(x):
try:
return math.isnan(x)
except TypeError:
return False
if any([isnan(x) for x in self.Values()]):
msg = 'Keys contain NaN, may not sort correctly.'
logging.warning(msg)
try:
return sorted(self.d.items())
except TypeError:
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
return zip(*self.SortedItems())
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in self.SortedItems():
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
# NOTE: I've decided to remove the magic comparators because they
# have the side-effect of making Pmf sortable, but in fact they
# don't support sorting.
def Normalize(self, fraction=1):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0:
raise ValueError('Normalize: total probability is zero.')
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
return sum(p * x for x, p in self.Items())
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
return sum(p * (x-mu)**2 for x, p in self.Items())
def Expect(self, func):
"""Computes the expectation of func(x).
Returns:
expectation
"""
return np.sum(p * func(x) for x, p in self.Items())
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MAP(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
# Calling this function MaximumLikelihood is potentially misleading,
# since the highest posterior probability does not necessarily
# correspond to the highest likelihood. MAP, for maximum aposteori
# probability, is better, but still potentially misleading because
# we might apply it to a Pmf that is not a posterior distribution.
# So I'm providing both names.
MaximumLikelihood = MAP
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
__radd__ = __add__
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
if other == 0:
return self.Copy()
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf:
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return '%s(%s, %s, %s)' % (cls, str(self.xs), str(self.ps),
repr(self.label))
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in zip(self.xs, self.ps):
print(val, prob)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def Values(self, ps=None):
"""Returns InverseCDF(p), the value that corresponds to probability p.
If ps is not provided, returns all values.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
if ps is None:
return self.xs
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
ValueArray = Values
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100)
def Percentiles(self, ps):
"""Returns the value that corresponds to percentiles ps.
Args:
ps: numbers in the range [0, 100]
Returns:
array of values
"""
ps = np.asarray(ps)
return self.Values(ps / 100)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100
def PercentileRanks(self, xs):
"""Returns the percentile ranks of the values in xs.
xs: potential value in the CDF
returns: array of percentile ranks in the range 0 to 100
"""
return self.Probs(x) * 100
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalGeometricPmf(k, p, loc=0):
"""Evaluates the geometric PMF.
With loc=0: Probability of `k` trials to get one success.
With loc=-1: Probability of `k` trials before first success.
k: number of trials
p: probability of success on each trial
"""
return stats.geom.pmf(k, p, loc=loc)
def MakeGeometricPmf(p, loc=0, high=10):
"""Evaluates the binomial PMF.
With loc=0: PMF of trials to get one success.
With loc=-1: PMF of trials before first success.
p: probability of success
high: upper bound where PMF is truncated
"""
pmf = Pmf()
for k in range(high):
pmf[k] = stats.geom.pmf(k, p, loc=loc)
pmf.Normalize()
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
return stats.poisson.pmf(k, lam)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = stats.poisson.pmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalParetoPdf(x, xm, alpha):
"""Computes the Pareto.
xm: minimum value (scale parameter)
alpha: shape parameter
returns: float probability density
"""
return stats.pareto.pdf(x, alpha, scale=xm)
def MakeParetoPmf(xm, alpha, high, num=101):
"""Makes a PMF discrete approx to a Pareto distribution.
xm: minimum value (scale parameter)
alpha: shape parameter
high: upper bound value
num: number of values
returns: normalized Pmf
"""
xs = np.linspace(xm, high, num)
ps = stats.pareto.pdf(xs, alpha, scale=xm)
pmf = Pmf(dict(zip(xs, ps)))
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta:
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def MAP(self):
"""Computes the value with maximum a posteori probability."""
a = self.alpha - 1
b = self.beta - 1
return a / (a + b)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if label is None and self.label is not None:
label = self.label
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
with open(dct_file, **options) as f:
for line in f:
match = re.search( r'_column\(([^)]*)\)', line)
if not match:
continue
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
|
santosfamilyfoundation/SantosCloud
|
app/traffic_cloud_utils/plotting/thinkstats2.py
|
Python
|
mit
| 73,337
|
[
"Gaussian"
] |
a3781161afc124b415da613e5984111f9e71b97deea37f56ee6b4b0755e5ac79
|
import ast
from nose.tools import raises
from jaspyx.ast_util import ast_store, ast_load, ast_call
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestAssignSingleTarget(V8Helper):
def test_single_assign(self):
assert self.run(
[
ast.Assign(
[ast_store('test')],
ast.Str('test'))
],
'test'
) == 'test'
def test_multi_assign(self):
assert self.run(
[
ast.Assign(
[
ast_store('test1'),
ast_store('test2')
],
ast.Str('test')
)
],
'test1 + "+" + test2'
) == 'test+test'
@raises(Exception)
def test_assign_multiple_slice(self):
self.v.visit(
ast.Assign(
[
ast.Subscript(
ast_load('foo'),
ast.Slice(),
ast.Store()
),
ast_store('bar'),
],
ast.Str('test')
)
)
def _slice_assign(self, start, end):
result = self.run(
[
ast.Assign(
[ast_store('test')],
ast.List([ast.Num(x) for x in range(10)], ast.Load())
),
ast.Assign(
[
ast.Subscript(
ast_load('test'),
ast.Slice(start and ast.Num(start), end and ast.Num(end), None),
ast.Store()
),
],
ast.List([ast.Num(42), ast.Num(43)], ast.Load())
),
],
'test',
list
)
return result
def test_assign_slice_full(self):
assert self._slice_assign(None, None) == [42, 43]
def test_assign_slice_start(self):
assert self._slice_assign(5, None) == [0, 1, 2, 3, 4, 42, 43]
def test_assign_slice_neg_start(self):
assert self._slice_assign(-6, None) == [0, 1, 2, 3, 42, 43]
def test_assign_slice_end(self):
assert self._slice_assign(None, 5) == [42, 43, 5, 6, 7, 8, 9]
def test_assign_slice_neg_end(self):
assert self._slice_assign(None, -1) == [42, 43, 9]
def test_assign_slice_start_end(self):
assert self._slice_assign(2, 8) == [0, 1, 42, 43, 8, 9]
def test_assign_slice_neg_start_end(self):
assert self._slice_assign(-8, 8) == [0, 1, 42, 43, 8, 9]
def test_assign_slice_neg_start_neg_end(self):
assert self._slice_assign(-8, -2) == [0, 1, 42, 43, 8, 9]
def test_assign_expr_slice(self):
assert self.run(
[
ast.Assign(
[ast_store('test')],
ast.List([ast.Num(x) for x in range(10)], ast.Load())
),
ast.FunctionDef(
'f_test',
ast.arguments([], None, None, []),
[
ast.Return(ast_load('test')),
],
[]
),
ast.Assign(
[
ast.Subscript(
ast_call(ast_load('f_test')),
ast.Slice(ast.Num(2), ast.Num(8), None),
ast.Store()
),
],
ast.List([ast.Num(42), ast.Num(43)], ast.Load())
),
],
'test',
list
) == [0, 1, 42, 43, 8, 9]
def test_destructure(self):
assert self.run(
[
ast.Assign(
[ast_store('test2')],
ast_call(
ast.FunctionDef(
'',
ast.arguments([], None, None, []),
[
ast.Global(['test1']),
ast.Assign(
[
ast.List(
[
ast_store('test1'),
ast_store('test2'),
],
ast.Store()
)
],
ast.List(
[
ast.Str('test1'),
ast.Str('test2'),
],
ast.Load()
)
),
ast.Return(ast_load('test2'))
],
[]
)
)
)
],
'test1 + "+" + test2'
) == 'test1+test2'
|
iksteen/jaspyx
|
jaspyx/tests/visitor/test_assign.py
|
Python
|
mit
| 5,319
|
[
"VisIt"
] |
140f687f08fdddeec77f9e11845039d1bb701b39ac55044681b64bdf9bccbdc5
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(ebrevdo): Use asserts contrib module when ready
def _assert_all_positive(x):
return logging_ops.Assert(
math_ops.reduce_all(x > 0),
["Tensor %s should contain only positive values: " % x.name, x])
class Gaussian(object):
"""The scalar Gaussian distribution with mean and stddev parameters mu, sigma.
#### Mathematical details
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Gaussian distribution.
dist = tf.contrib.distributions.Gaussian(mu=0, sigma=3)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1)
# Define a batch of two scalar valued Gaussians.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Gaussian(mu=[1, 2.], sigma=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Gaussians.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Gaussian(mu=1, sigma=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(self, mu, sigma, name=None):
"""Construct Gaussian distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: `float` or `double` tensor, the means of the distribution(s).
sigma: `float` or `double` tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
with ops.op_scope([mu, sigma], name, "Gaussian"):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies([_assert_all_positive(sigma)]):
self._mu = array_ops.identity(mu, name="mu")
self._sigma = array_ops.identity(sigma, name="sigma")
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def dtype(self):
return self._mu.dtype
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
@property
def mean(self):
return self._mu * array_ops.ones_like(self._sigma)
def log_pdf(self, x, name=None):
"""Log pdf of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogPdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name=None):
"""CDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianCdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name=None):
"""Log CDF of observations `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogCdf"):
return math_ops.log(self.cdf(x))
def pdf(self, x, name=None):
"""The PDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianPdf"):
return math_ops.exp(self.log_pdf(x))
def entropy(self, name=None):
"""The entropy of Gaussian distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.op_scope([self._mu, self._sigma], name, "GaussianEntropy"):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample(self, n, seed=None, name=None):
"""Sample `n` observations from the Gaussian Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.op_scope([self._mu, self._sigma, n], name, "GaussianSample"):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(
0, [array_ops.pack([n]), array_ops.shape(self.mean)])
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
|
petewarden/tensorflow_makefile
|
tensorflow/contrib/distributions/python/ops/gaussian.py
|
Python
|
apache-2.0
| 8,018
|
[
"Gaussian"
] |
74285a655bd180ac26a36e653f43706717f68e75ffaef4e21ba952d92a574801
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAcgh(RPackage):
"""Functions for reading aCGH data from image analysis output files
and clone information files, creation of aCGH S3 objects for storing
these data. Basic methods for accessing/replacing, subsetting,
printing and plotting aCGH objects."""
homepage = "https://www.bioconductor.org/packages/aCGH/"
url = "https://git.bioconductor.org/packages/aCGH"
version('1.54.0', git='https://git.bioconductor.org/packages/aCGH', commit='be2ed339449f55c8d218e10c435e4ad356683693')
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-acgh/package.py
|
Python
|
lgpl-2.1
| 2,036
|
[
"Bioconductor"
] |
95bc130b8885f2510a0212d70099c59994a21ae91b71d6cf21bd561ed7412207
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common settings and connection objects for DigitalOcean Cloud
"""
from libcloud.utils.py3 import httplib, parse_qs, urlparse
from libcloud.common.base import BaseDriver
from libcloud.common.base import ConnectionKey
from libcloud.common.base import JsonResponse
from libcloud.common.types import LibcloudError, InvalidCredsError
__all__ = [
'DigitalOcean_v2_Response',
'DigitalOcean_v2_Connection',
'DigitalOceanBaseDriver'
]
class DigitalOcean_v1_Error(LibcloudError):
"""
Exception for when attempting to use version 1
of the DigitalOcean API which is no longer
supported.
"""
def __init__(self,
value=('Driver no longer supported: Version 1 of the '
'DigitalOcean API reached end of life on November 9, '
'2015. Use the v2 driver. Please visit: '
'https://developers.digitalocean.com/documentation/changelog/api-v1/sunsetting-api-v1/'), # noqa: E501
driver=None):
super(DigitalOcean_v1_Error, self).__init__(value, driver=driver)
class DigitalOcean_v2_Response(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'message' in body:
error = '%s (code: %s)' % (body['message'], self.status)
else:
error = body
return error
def success(self):
return self.status in self.valid_response_codes
class DigitalOcean_v2_Connection(ConnectionKey):
"""
Connection class for the DigitalOcean (v2) driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOcean_v2_Response
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` to the request.
"""
headers['Authorization'] = 'Bearer %s' % (self.key)
headers['Content-Type'] = 'application/json'
return headers
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method adds ``per_page`` to the request to reduce the total
number of paginated requests to the API.
"""
# pylint: disable=maybe-no-member
params['per_page'] = self.driver.ex_per_page
return params
class DigitalOceanConnection(DigitalOcean_v2_Connection):
"""
Connection class for the DigitalOcean driver.
"""
pass
class DigitalOceanResponse(DigitalOcean_v2_Response):
pass
class DigitalOceanBaseDriver(BaseDriver):
"""
DigitalOcean BaseDriver
"""
name = 'DigitalOcean'
website = 'https://www.digitalocean.com'
def __new__(cls, key, secret=None, api_version='v2', **kwargs):
if cls is DigitalOceanBaseDriver:
if api_version == 'v1' or secret is not None:
raise DigitalOcean_v1_Error()
elif api_version == 'v2':
cls = DigitalOcean_v2_BaseDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(DigitalOceanBaseDriver, cls).__new__(cls, **kwargs)
def ex_account_info(self):
raise NotImplementedError(
'ex_account_info not implemented for this driver')
def ex_list_events(self):
raise NotImplementedError(
'ex_list_events not implemented for this driver')
def ex_get_event(self, event_id):
raise NotImplementedError(
'ex_get_event not implemented for this driver')
def _paginated_request(self, url, obj):
raise NotImplementedError(
'_paginated_requests not implemented for this driver')
class DigitalOcean_v2_BaseDriver(DigitalOceanBaseDriver):
"""
DigitalOcean BaseDriver using v2 of the API.
Supports `ex_per_page` ``int`` value keyword parameter to adjust per page
requests against the API.
"""
connectionCls = DigitalOcean_v2_Connection
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=None, ex_per_page=200, **kwargs):
self.ex_per_page = ex_per_page
super(DigitalOcean_v2_BaseDriver, self).__init__(key, **kwargs)
def ex_account_info(self):
return self.connection.request('/v2/account').object['account']
def ex_list_events(self):
return self._paginated_request('/v2/actions', 'actions')
def ex_get_event(self, event_id):
"""
Get an event object
:param event_id: Event id (required)
:type event_id: ``str``
"""
params = {}
return self.connection.request('/v2/actions/%s' % event_id,
params=params).object['action']
def _paginated_request(self, url, obj):
"""
Perform multiple calls in order to have a full list of elements when
the API responses are paginated.
:param url: API endpoint
:type url: ``str``
:param obj: Result object key
:type obj: ``str``
:return: ``list`` of API response objects
:rtype: ``list``
"""
params = {}
data = self.connection.request(url)
try:
query = urlparse.urlparse(data.object['links']['pages']['last'])
# The query[4] references the query parameters from the url
pages = parse_qs(query[4])['page'][0]
values = data.object[obj]
for page in range(2, int(pages) + 1):
params.update({'page': page})
new_data = self.connection.request(url, params=params)
more_values = new_data.object[obj]
for value in more_values:
values.append(value)
data = values
except KeyError: # No pages.
data = data.object[obj]
return data
|
Kami/libcloud
|
libcloud/common/digitalocean.py
|
Python
|
apache-2.0
| 7,037
|
[
"VisIt"
] |
724810bf9017048f74a8d02528002e4938937ec5b8f743989876947bafda32c1
|
"""This module provides tools for finding the best fit object model by marginalising
over the space of possible PSFs.
For LkCa15 testing, see ./marginalise_image.py.
"""
from __future__ import print_function, division
import mdp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pdb
from scipy.spatial import Delaunay
from scipy.interpolate import RectBivariateSpline
import scipy.ndimage as nd
import astropy.io.fits as pyfits
import scipy.optimize as op
import opticstools as ot
import emcee
import multiprocessing
from scipy.ndimage import zoom
#!!! Removed pi from the following two functions.
def optimize_tilt_derivative(p, mod_ft, im_ft, uv):
"""Helper function for optimize_tilt."""
#The Jacobian, i.e. the derivatives of the model with respect to tilt
#complex_jac = [uv[0]*mod_ft*np.exp(1j*(p[0]*uv[0] + p[1]*uv[1])),\
# uv[1]*mod_ft*np.exp(1j*(p[0]*uv[1] + p[1]*uv[1]))]
#retarray = np.array([complex_jac[0].imag,-complex_jac[0].real,complex_jac[1].imag,-complex_jac[1].real])
complex_jac = [1j*uv[0]*mod_ft*np.exp(1j*(p[0]*uv[0] + p[1]*uv[1])),\
1j*uv[1]*mod_ft*np.exp(1j*(p[0]*uv[1] + p[1]*uv[1]))]
retarray = np.array([complex_jac[0].real,complex_jac[0].imag,complex_jac[1].real,complex_jac[1].imag])
return retarray.reshape( (2,2*np.prod(uv[0].shape)) )
def optimize_tilt_function(p, mod_ft, im_ft, uv, return_model=False):
"""Helper function for optimize_tilt. This is used as a function to
input into leastsq
Parameters
----------
p: numpy array (2): [xtilt,ytilt]
The wavefront tilt image-plane pixels.
mod_ft: array
Model Fourier transform that we want to fit to the image
im_ft: array
Image Fourier transform
uv: array
Sampling points for mod_ft and im_ft
Returns
-------
resid: numpy array
Array of residuals to the fit.
"""
new_model = mod_ft*np.exp(1j*(p[0]*uv[0] + p[1]*uv[1]))
if return_model:
return new_model
retval_complex = im_ft - new_model
return np.append(retval_complex.real.flatten(), retval_complex.imag.flatten())
def optimize_tilt(mod_ft, im_ft, uv, scale_flux=False, check_fit=False):
"""Given a PSF and an image Fourier transform sampled at points u and
v, tilt and scale the model so that it matches the image.
We do this fitting in the Fourier domain rather than the image-plane, because
sub-pixel tilts can be more precise in the Fourier domain. A shift is a convolution
with a delta-function centered at the pixel (xshift, yshift). So we can shift
by multiplying the Fourier transform of the image by the Fourier transform of this
shift operator, which we can call the shift kernel.
Parameters
----------
u,v: float array
Cycles per pixel times 2 np.pi.
Returns
-------
The shifted model Fourier transform."""
if scale_flux:
im_ft_out = im_ft/im_ft[0]
else:
im_ft_out = im_ft
#retvals = op.leastsq(optimize_tilt_function, [0,0], args=(mod_ft,im_ft_out,uv), \
# Dfun=optimize_tilt_derivative, col_deriv=True, xtol=1e-3, ftol=1e-6)
retvals = op.leastsq(optimize_tilt_function, [0,0], args=(mod_ft,im_ft_out,uv), \
xtol=1e-4, ftol=1e-7)
if check_fit:
pdb.set_trace()
if retvals[1] <= 0:
print("Error in finding tilts!")
raise UserWarning
else:
return retvals[0]
def prepare_im(im, ref_ft, uv, sampled_uv, corner_pix, center_ft = True, scale = 1.0):
"""A helper function to shift an image, optimize its tilt and subtract the
background.
Parameters
----------
im: numpy array
Image that we want to prepare.
ref_ft: numpy array
Fourier transform of the reference image, that defines a "centered" position.
uv, sampled_uv: numpy array
The (u,v) coordinates and pixel values in the UV plane.
corner_pix: numpy array
The image corner pixels, which defined the background.
center_ft: (optional) bool
Do we return a Fourier transform with sub-pixel sampling?
Returns
-------
(a_psf, a_psf_ft):
Roughly centered image, and precisely centered imaged Fourier transform (unless
center_ft is False)
"""
sz = im.shape[0]
maxpix = np.unravel_index(np.argmax(im), im.shape)
a_psf = np.roll(np.roll(im, sz//2 - maxpix[0], axis=0), sz//2 - maxpix[1],axis=1)
a_psf -= np.median(a_psf[corner_pix])
if scale == 1.0:
mod_ft = np.fft.rfft2(a_psf)[sampled_uv]
else:
if scale > 1:
scaled_psf = zoom(a_psf, scale)[a_psf.shape[0]//2-sz//2:a_psf.shape[0]//2+sz//2,\
a_psf.shape[1]//2-sz//2:a_psf.shape[1]//2+sz//2]
else:
scaled_psf = np.zeros_like(a_psf)
scaled_psf[sz//2-a_psf.shape[0]//2:sz//2+a_psf.shape[0]//2,\
sz//2-a_psf.shape[1]//2:sz//2+a_psf.shape[1]//2] = zoom(a_psf, scale)
mod_ft = np.fft.rfft2(scaled_psf)[sampled_uv]
if center_ft:
tilt = optimize_tilt(mod_ft/mod_ft[0], ref_ft/ref_ft[0], uv)
a_psf_ft = optimize_tilt_function(tilt, mod_ft, ref_ft, uv, return_model=True)
else:
a_psf_ft = mod_ft
return a_psf, a_psf_ft
class Psfs(object):
"""A set of reference PSFs, which creates the space to marginalise over"""
def __init__(self, psfs=[], psf_files=[], wave=3.5e-6, diam=10.0,pscale=0.01, \
cubefile=None, cube_extn=1, hyperparams=[], subtract_outer_median=True, scale=1.0):
"""Initialise the reference PSFs. This includes reading them in, cleaning
and shifting to the origin. Cleaning here includes Fourier filtering, and
shifting to the origin is done in a least squares sense, i.e. a sub-pixel shift
that refers PSFs to a master (i.e. mean) PSF.
Given that the PSFs have limited support in the Fourier domain, we will store them
as complex Fourier component vectors on this support. Then the process of fitting
to a linear combination of PSFs is just making a linear combination on this support.
Note that the (uv) co-ordinates are stored in a way most convenient for the
tilt_function, going from 0 to 2pi over the full Fourier domain.
Parameters
----------
wave: float
Wavelength in m
diam: float
Telescope diameter in m
pscale: float
Pixel scale in arcsec.
"""
self.ndim = 0 #0 until we embed the PSFs in a lower dimensional space!
self.use_this_psf = 0 #i.e. just use the first PSF until we're told otherwise.
if cubefile:
psfs = pyfits.getdata(cubefile,cube_extn)
else:
print("Not implemented quite yet...")
raise UserWarning
sz = psfs.shape[1]
self.sz = sz
self.npsfs = len(psfs)
outer_pix = (1-ot.circle(sz,2*sz/3)) > 0
for i in range(len(psfs)):
if subtract_outer_median:
psfs[i] -= np.median(psfs[i][outer_pix])
psfs[i] /= np.sum(psfs[i])
uv = np.meshgrid(2*np.pi*np.arange(sz//2 + 1)/float(sz),
2*np.pi*(((np.arange(sz) + sz//2) % sz) - sz//2)/float(sz))
#A variable that is 2*pi for 1 cycle per pixel.
rr = np.sqrt(uv[0]**2 + uv[1]**2)
sampled_uv = np.where(rr < 2*np.pi*diam/wave*np.radians(pscale/3600.))
#While sampled_uv is an integer array of uv pixel coordinates, uv is an
#array of Fourier frequency in inverse pixel units
self.sampled_uv = sampled_uv
self.uv = np.array([uv[0][sampled_uv],uv[1][sampled_uv]])
psf_mn = np.sum(psfs,0)/psfs.shape[0]
psf_mn_ft = np.fft.rfft2(psf_mn)[sampled_uv]
psf_fts = []
#NB This should probably be run twice - once to get a better psf_mn_ft.
corner_pix = np.where(1 - ot.circle(self.sz, self.sz))
for i in range(len(psfs)):
centered_psf, a_psf_ft = prepare_im(psfs[i], psf_mn_ft, self.uv, self.sampled_uv, corner_pix, scale=scale)
psf_fts.append(a_psf_ft)
self.psf_fts = np.array(psf_fts)
self.psf_fts_vect = np.array([np.append(psf_ft.real, psf_ft.imag) for psf_ft in psf_fts])
self.psf_mn_ft = psf_mn_ft
def psf_im(self,ix):
"""Helper function to return a point-spread function from a library MTF
Parameters
----------
ix: int
Index of the point spread function to return
"""
if (ix >= len(self.psf_fts)):
print("ERROR: index out of range")
raise UserWarning
else:
return self.im_from_ft(self.psf_fts[ix])
def im_from_ft(self,im_ft_sampled):
"""Return a full image based on the subsampled Fourier plane."""
im_ft = np.zeros( (self.sz,self.sz//2+1), dtype=np.complex)
im_ft[self.sampled_uv] = im_ft_sampled
return np.fft.irfft2(im_ft)
def lle(self,ndim=2,nk=None, length_nsigma=2.0):
"""Embed the PSFs onto an ndim dimensional space
Parameters
----------
ndim: int
Number of LLE dimensions
nk: int
Number of nearest neighbors to check.
"""
self.ndim=ndim
#The following
if not nk:
nk = int(1.5*np.sqrt(self.psf_fts_vect.shape[0]))
self.nk = nk
self.lle_proj = mdp.nodes.LLENode(nk,output_dim=ndim,verbose=True)(self.psf_fts_vect)
lengths = np.empty(ndim)
for d in range(ndim):
one_sigmas = np.percentile(self.lle_proj[:,d],[16,84])
lengths[d] = length_nsigma*(one_sigmas[1]-one_sigmas[0])
print("Axis lengths: " + str(lengths) )
self.h_density = (np.prod(lengths)/self.lle_proj.shape[0])**(1.0/ndim)
self.tri = Delaunay(self.lle_proj)
self.point_lengths = np.sum(self.tri.points**2,1)
def display_lle_space(self, nsamp=100, return_density=False):
"""Display the space of PSFs as a density in LLE space plus the points
from which it was constructed.
Parameters
----------
nsamp: int (optional)
number of samples in each dimension in the 2D image
Returns
-------
density: numpy array
The probability density that is plotted, if return_dentiy=True.
extent: numpy array
The extent of the plot, if return_dentiy=True.
"""
if self.ndim != 2:
print("A 2D display only works for ndim=2")
sz = self.sz #Short-hand
uv = np.meshgrid(2*np.pi*np.arange(sz//2 + 1)/float(sz),
2*np.pi*(((np.arange(sz) + sz//2) % sz) - sz//2)/float(sz))
x=np.linspace(-0.5,0.5,nsamp)
density = np.empty( (nsamp, nsamp) )
xy = np.meshgrid(x,x)
for i in range(nsamp):
for j in range(nsamp):
density[i,j] = self.lle_density([xy[0][i,j],xy[1][i,j]])
extent = [-0.5,0.5,-0.5,0.5]
density = density[::-1,:]
if return_density:
return density, extent
else:
plt.clf()
plt.imshow(density, extent=extent, cmap=cm.gray)
plt.plot(self.tri.points[:,0], self.tri.points[:,1], '.')
plt.axis(extent)
def augment_zernike(self,naugment=3, amps=np.ones(7)*0.1):
"""Augment the library of reference PSFs by adding zernike's to them
(neglecting tilt)"""
def find_lle_psf(self,x, return_image=True):
"""Return the unique interpolated PSF from ndim+1 library PSFs, by
interpolating within the smallest enclosing simplex where all
angles. If outside the convex hull, find the nearest edge/faces and
the simplex that includes one of these and (if possible) extends the furthest
in the opposite direction. """
#If we havnen't embedded out PSFs into some abstract space, this is simple!
if self.ndim==0:
if return_image:
return self.psf_im(self.use_this_psf)
else:
return self.psf_fts[self.use_this_psf]
#Otherwise, we have to find nearby LLE co-ordinates (an enclosing simplex) and
#interpolate between PSFs.
x = np.array(x)
enclosing_simplex = self.tri.find_simplex(x)
if enclosing_simplex<0:
#Distances between x and the points
dists = self.point_lengths - 2*np.dot(self.tri.points,x) + np.sum(x**2)
nearest = np.argmin(dists)
possible_simplices = np.where(np.sum(self.tri.simplices==nearest,axis=1))[0]
#Given a simplex and reference vertex r we can find c such that.
# T . c = x-v2
#... then x = v2 + c0*(v0-v2) + c1*(v1-v2)
# = c0*v0 + c1*v1 + (1-c0-c1)*v2
min_coeffs=[]
for simplex in possible_simplices:
coeffs = np.dot(self.tri.transform[simplex][:self.ndim,:self.ndim], \
x - self.tri.transform[simplex][-1])
coeffs = np.append(coeffs, 1-np.sum(coeffs))
min_coeffs.append(np.min(coeffs))
#The best simplex is the one with the least negative coefficient.
simplex = possible_simplices[np.argmax(min_coeffs)]
else:
simplex = enclosing_simplex
#Now that we know which simplex to use, get the coefficients and find the PSF
coeffs = np.dot(self.tri.transform[simplex][:self.ndim,:self.ndim], \
x - self.tri.transform[simplex][-1])
coeffs = np.append(coeffs, 1-np.sum(coeffs))
interp_psf_ft = np.dot(coeffs,self.psf_fts[self.tri.simplices[simplex]])
if return_image:
return self.im_from_ft(interp_psf_ft)
else:
return interp_psf_ft
def trunc_gauss(self, q):
""" Compute the truncated Gaussian probability density"""
wl = np.where( (q>0) * (q<0.5) )[0]
wh = np.where( (q>0.5) * (q<1) )[0]
the_sum = (np.sum(1-6*q[wl]**2+6*q[wl]**3) + \
np.sum(2*(1-q[wh])**3))
if self.ndim==2:
return 40/np.pi/7*the_sum
else:
return 8/np.pi*the_sum
def lle_density(self,x):
"""Return the local probability density of a given LLE coordinate
Normalise to a total integral of 1.0 over all LLE parameter space."""
#Brute force here... KDTree will help as we only need to consider
#12-ish nearest neighbors in 2D and 33-ish nearest neighbors in 3D.
if self.ndim==0:
print("Density zero - must compute the LLE first!")
return 0
dists = np.array([np.sqrt(np.sum((x - y)**2)) for y in self.lle_proj])
ww = np.where(dists < 2*self.h_density)[0]
if len(ww)==0:
return 0.0
else:
return self.trunc_gauss(dists[ww]/2.0/self.h_density)/len(self.lle_proj)/(2*self.h_density)**2
def mcmc_explore(self,niter=30,stepsize=0.5):
"""Explore the space of PSFs."""
current_pos = self.lle_proj[0]
current_density = self.lle_density(current_pos)
print("Computing overall background density for plotting...")
density, extent = self.display_lle_space(return_density=True)
for i in range(niter):
plt.clf()
plt.subplot(121)
jump = np.random.normal(size=self.ndim)*stepsize*self.h_density
trial = current_pos + jump
new_density = self.lle_density(trial)
if new_density/current_density > np.random.random():
current_density = new_density
current_pos = trial
psf = self.find_lle_psf(current_pos, return_image=True)
plt.imshow(np.arcsinh(psf/np.max(psf)/0.01),interpolation='nearest', cmap=cm.cubehelix)
plt.title("lnprob: {0:5.1f}".format(current_density))
plt.subplot(122)
plt.imshow(density,extent=extent,cmap=cm.gray)
plt.plot(self.tri.points[:,0],self.tri.points[:,1],'b.')
plt.title("Pos: {0:5.2f} {1:5.2f}".format(current_pos[0], current_pos[1]))
plt.plot(current_pos[0], current_pos[1], 'ro')
plt.axis(extent)
plt.draw()
#!!! Problem: Current matplotlib does not draw here. !!!
dummy = plt.ginput(1)
return None
def hyperparam_prob(self,x, hyperparams=None):
"""Return the hyperparameter probability for a given set of LLE coordinates
and hyperparameters. Uses the same density kernel as lle_density."""
return self.lle_density(x)
class PtsrcObject(object):
def __init__(self,initp = []):
"""A model for the object on sky, consisting of a single point source.
Other objects can inherit this. Generally, there will be some fixed parameters
and some variable parameters. The model parameters are *not* imaging parameters,
i.e. do not include x, y, flux variables.
"""
self.p = initp
self.np = len(initp)
def model_uv(self, p_in, uv):
"""Return a model of the Fourier transform of the object given a set of
points in the uv plane
Parameters
----------
p_in: array-like
model parameters. Can be None if if the model has no parameters!
uv: array-like
Coordinates in the uv plane """
return np.ones(uv.shape[1])
class ModelObject(object):
def __init__(self,initp = [], infits=''):
"""A model for the object on sky, consisting of an input fits files.
Parameters
----------
initp: array-like
Unused: The input parameters
infits: string
A filename for an input model fits files.
"""
if len(infits)==0:
raise ValueError("Must set keyword infits to a filename!")
self.p = initp
self.np = len(initp)
#Read in the fits file.
im = pyfits.getdata(infits)
if im.shape[0] != im.shape[1]:
raise ValueError("Model Image must be square")
#Take the Fourier transform and make sure we have coordinate arrays ready
#for interpolation
self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im)))
uv_coord = 2*np.pi*(np.arange(im.shape[0]) - im.shape[0]//2)/float(im.shape[0])
#x = np.arange(im.shape[0]) - im.shape[0]//2 #XXX
#y = np.arange(im.shape[1]) - im.shape[1]//2
#self.mod_ft_func = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft, kx=1, ky=1)
self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)
self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)
#self.yx = np.meshgrid(x, y)
def model_uv(self, p_in, uv):
"""Return a model of the Fourier transform of the object given a set of
points in the uv plane
Parameters
----------
p_in: array-like
model parameters. Can be None if if the model has no parameters!
uv: array-like
Coordinates in the uv plane """
ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)
ret_array += 1j*self.mod_ft_imagfunc(uv[0], uv[1], grid=False)
return ret_array
class ResidObject(object):
def __init__(self,initp = [], resid_in=None, psf_in=None):
"""A model for the object on sky, consisting of a point source and a
map that has been convolved with the PSF map.
The idea is that, iteratively, the fit residuals can be added to to the input
residuals to last model residuals, and the final problem is a standard
deconvolution problem with a known PSF.
Parameters
----------
initp: array-like
A single parameter, the relative flux of the resolved part of the image.
resid_in: numpy array
Residuals from the previous iteration. Same size and format as the input
image, but with N down and E left when displayed with imshow.
psf_in: numpy array
The PSF that should be used for the residuals, weighted in the same way.
"""
self.p = initp
self.np = len(initp)
#Normalise the input image and PSF
im = resid_in.copy()
im /= np.sum(im)
mean_psf = psf_in.copy()
mean_psf /= np.sum(mean_psf)
#Error checking
if im.shape[0] != im.shape[1]:
raise ValueError("Model Image must be square")
if im.shape != mean_psf.shape:
raise ValueError("PSF must have the same shape as input residuals.")
#Take the Fourier transform and make sure we have coordinate arrays ready
#for interpolation. The line below could have a divide by zero - but not where
#the Fourier transform has non-zero support.
self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / \
np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))
uv_coord = 2*np.pi*(np.arange(im.shape[0]) - im.shape[0]//2)/float(im.shape[0])
self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)
self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)
def model_uv(self, p_in, uv):
"""Return a model of the Fourier transform of the object given a set of
points in the uv plane
Parameters
----------
p_in: array-like
model parameters. Can be None if if the model has no parameters!
uv: array-like
Coordinates in the uv plane """
ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)
ret_array += 1j*self.mod_ft_imagfunc(uv[0], uv[1], grid=False)
return p_in[0]*ret_array + (1-p_in[0])
class BinaryObject(PtsrcObject):
def __init__(self, initp=[]):
"""A Model with two point-sources
Parameters
----------
init_p: numpy array(3)
North Separation in pix, East separation in pix, contrast secondary/primary
"""
super(BinaryObject, self).__init__(initp)
def model_uv(self, p_in, uv):
"""Return a model of the Fourier transform of a binary given a set of points
in the uv plane
Parameters
----------
p_in: array-like
North Separation in pix, East Separation in pix, Contrast secondary/primary
uv: array-like
Coordinates in the uv plane """
#The Fourier transform of 2 delta functions is the sum of the Fourier transform
#of each delta function. Lets make the primary star be at co-ordinate (0,0)
ft_object = np.ones(uv.shape[1]) + p_in[2]*np.exp(1j*(p_in[0]*uv[0] + p_in[1]*uv[1]))
ft_object /= 1+p_in[2]
return ft_object
class Target(object):
"""A set of target images"""
def __init__(self,psfs,object_model,ims=[], im_files=[],hyperparams=[],
cubefile=None, cube_extn=0, pas_extn=2, pas=[], gain=4.0):
"""Initialise the reference PSFs. This includes reading them in, cleaning,
shifting to the origin, normalising, creating variance arrays, and chopping
out the uv component.
Parameters
----------
psfs: Psfs instance
The PSF library to go with the target
object: PtsrcObject instance
The object model
ims: float numpy array
A set of target images
Notes
-----
The input gain should come from pynrm!!! (or better: be set to 1 by scaling)
"""
self.psfs= psfs
self.object = object_model
if cubefile:
ims = pyfits.getdata(cubefile,cube_extn)
self.pas = pyfits.getdata(cubefile,pas_extn)['pa']
else:
print("Not implemented quite yet...")
raise UserWarning
self.n_ims = len(ims)
self.sz = ims.shape[1]
if self.sz != self.psfs.sz:
print("Error: PSFs and target images must be the same size")
raise UserWarning
self.tgt_uv = np.empty( (self.n_ims, self.psfs.uv.shape[0], self.psfs.uv.shape[1]) )
#!!! NB Sign of rotation not checked below !!!
for i in range(self.n_ims):
self.tgt_uv[i,0] = self.psfs.uv[0]*np.cos(np.radians(self.pas[i])) + \
self.psfs.uv[1]*np.sin(np.radians(self.pas[i]))
self.tgt_uv[i,1] = self.psfs.uv[1]*np.cos(np.radians(self.pas[i])) - \
self.psfs.uv[0]*np.sin(np.radians(self.pas[i]))
self.corner_pix = np.where(1 - ot.circle(self.sz, self.sz))
self.read_var = 0.0
#Assumption: all target images have the same readout and/or background
#variance !!!
#!!! Bad pixels have to be added in separately here !!!
#!!! There should be read noise and variance from pynrm !!!
im_fts = []
for i in range(len(ims)):
centered_im, a_psf_ft = prepare_im(ims[i], self.psfs.psf_mn_ft, \
self.psfs.uv, self.psfs.sampled_uv, self.corner_pix, center_ft = False)
ims[i] = centered_im
self.read_var += np.var(ims[i][self.corner_pix])
im_fts.append(a_psf_ft)
self.ims = ims
self.im_fts = np.array(im_fts)
self.read_var /= len(ims)
self.ivar = 1.0/(self.read_var + ims/gain)
def lnprob(self, x, tgt_use=[], return_mod_ims=False, return_mod_psfs=False):
"""Compute the log probability of a model.
Parameters
----------
x: numpy array
The input LLE coordinates, followed by the model parameters, in the order:
[lle[0],lle[1],,,,lle[len(tgt_use)-1],p_in[0],,,,p_in[n_params-1], where
each of lle[0] etc is a list of length psfs.ndim.
p_use: numpy int array (optional)
The list of model parameters to use, e.g. [0,1,3], in case we want to fix
some of them.
tgt_use: numpy int array (optional)
The list of target PSFs to fit to, in case we don't want to fit to all of them.
return_mod_ims: bool (optional)
Optionally return the model images rather than the log probability.
"""
x = np.array(x)
#If tgt_use is not given, use all targets.
if len(tgt_use)==0:
tgt_use = np.arange(self.n_ims)
if self.psfs.ndim > 0:
x_lle = x[:self.psfs.ndim * len(tgt_use)].reshape( (len(tgt_use), self.psfs.ndim) )
else:
x_lle=[]
x_p = x[self.psfs.ndim * self.psfs.npsfs:]
prior_prob=1.0
chi2 = 0.0
mod_ims = []
mod_psfs = []
#Loop through the image and add to the chi-squared
for i in range(len(tgt_use)):
prior_prob *= self.psfs.hyperparam_prob(x_lle[i])
#What is our object model?
obj_ft = self.object.model_uv(x_p, self.tgt_uv[tgt_use[i]])
#Convolve the object with the PSF model to form an image model.
psf_ft = self.psfs.find_lle_psf(x_lle[i], return_image=False)
if return_mod_psfs:
mod_psfs.append(self.psfs.im_from_ft(psf_ft))
mod_ft = obj_ft * psf_ft
#Find the tilt that best matches the image model, and form an image model,
#and scale the image to match the total flux.
scale_factor = self.im_fts[tgt_use[i]][0].real/mod_ft[0].real
tilt = optimize_tilt(mod_ft, self.im_fts[tgt_use[i]]/scale_factor, self.psfs.uv)#, check_fit=True)
mod_ft = optimize_tilt_function(tilt, mod_ft*scale_factor, self.im_fts[i], self.psfs.uv, return_model=True)
mod_im = self.psfs.im_from_ft(mod_ft)
#Do we want to return the image?
if return_mod_ims:
mod_ims.append(mod_im)
#Compute chi-squared
chi2 += np.sum((mod_im - self.ims[tgt_use[i]])**2*self.ivar[tgt_use[i]])
#Returning multiple things is a little messy, but it saves code duplication, or
#un-necessary computation.
if return_mod_ims and return_mod_psfs:
return np.array(mod_ims), np.array(mod_psfs)
elif return_mod_ims:
return np.array(mod_ims)
elif return_mod_psfs:
return np.array(mod_psfs)
if prior_prob==0:
return -np.inf
else:
return np.log(prior_prob) - chi2/2.0
def lle_simplex_interp(self,x):
"""!!!What is this ???"""
return None
def marginalise(self,init_par=[],walker_sdev=[],nchain=1000, use_threads=True, start_one_at_a_time=True):
"""Use the affine invariant Monte-Carlo Markov chain technique to marginalise
over all PSFs. We cheat a little by not marginalising over the model parameters
simultaneously - the parameters are expected to have Gaussian errors
that come out of a least squares process that fits to PSFs from a point source fit
(at least this is what I think I meant).
WARNING: This doesn't actually marginalise over the model parameters yet, it only
marginalises over the LLE parameters for the point source model.
"""
if len(init_par) != len(walker_sdev):
raise UserWarning("Require same number of parameters (init_par) as walker standard deviations (walker_sdev)!")
threads = multiprocessing.cpu_count()
if start_one_at_a_time:
#Try optimising one image at at time... (no parameters)
ndim = self.psfs.ndim
#Make an even number of walkers.
nwalkers = (3*ndim//2)*2
#Initialise the chain to random psfs.
p0 = np.empty( (nwalkers, ndim) )
init_lle_par = []
for i in range(nwalkers):
p0[i,:] = self.psfs.tri.points[int(np.random.random()*self.psfs.npsfs)]
for j in range(self.n_ims):
kwargs = {"tgt_use":[j]}
if use_threads:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob, threads=threads, kwargs=kwargs)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob, kwargs=kwargs)
sampler.run_mcmc(p0,nchain)
init_lle_par.append(sampler.flatchain[np.argmax(sampler.flatlnprobability)])
print("Done initial model for chain {0:d}".format(j))
init_lle_par = np.array(init_lle_par).flatten()
#Minimum number of walkers
ndim = self.psfs.ndim * self.n_ims + len(init_par)
nwalkers = 2*ndim
if use_threads:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob, threads=threads, kwargs=kwargs)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob, kwargs=kwargs)
#Initialise the chain to random psfs.
p0 = np.empty( (nwalkers, ndim) )
if start_one_at_a_time:
for i in range(nwalkers):
p0[i, :self.psfs.ndim * self.n_ims] = init_lle_par + 0.01*np.random.normal(size=ndim)*self.psfs.h_density
#Add in a Gaussian distribution of model parameters.
p0[i, self.psfs.ndim * self.n_ims:] = init_par + np.random.normal(size=len(init_par))*walker_sdev
else:
for i in range(nwalkers):
for j in range(self.n_ims):
p0[i,j*self.psfs.ndim:(j+1)*self.psfs.ndim] = \
self.psfs.tri.points[int(np.random.random()*self.psfs.npsfs)]
#Add in a Gaussian distribution of model parameters.
p0[i, self.psfs.ndim * self.n_ims:] = init_par + np.random.normal(size=len(init_par))*walker_sdev
sampler.run_mcmc(p0,nchain)
print("Best lnprob: {0:5.2f}".format(np.max(sampler.lnprobability)))
best_x = sampler.flatchain[np.argmax(sampler.flatlnprobability)]
return best_x, sampler
#Uncomment the following line for FunnelWeb line_profile.
#kernprof -l best_psf_binary
#python -m line_profiler best_psf_binary.lprof
# @profile
def find_best_psfs(self, p_fix, return_lnprob=False):
"""Make a simple fit of every target image, for fixed model parameters.
Parameters
----------
p_fix: array-like
Model parameters, that are fixed when finding the best PSF. Note that for a
point source model, this should be [].
"""
best_fit_ims = np.empty( (self.n_ims, self.ims[0].shape[0], self.ims[0].shape[1]) )
best_ixs = np.empty( self.n_ims, dtype=np.int )
chi2_total = 0.0
for i in range(self.n_ims):
#What is our object model?
obj_ft = self.object.model_uv(p_fix, self.tgt_uv[i])
#Chi-squared
chi2s = np.empty(self.psfs.npsfs)
mod_ims = np.empty( (self.psfs.npsfs, self.ims[0].shape[0], self.ims[0].shape[1]) )
for j in range(self.psfs.npsfs):
#Convolve the object with the PSF model to form an image model.
mod_ft = obj_ft * self.psfs.psf_fts[j]
#Find the tilt that best matches the image model, and form an image model,
#and scale the image to match the total flux.
#NB This is copied from lnprob, which is a little messy. They should be
#consolidated!
scale_factor = self.im_fts[i][0].real/mod_ft[0].real
tilt = optimize_tilt(mod_ft, self.im_fts[i]/scale_factor, self.psfs.uv)
mod_ft = optimize_tilt_function(tilt, mod_ft*scale_factor, self.im_fts[i], self.psfs.uv, return_model=True)
mod_ims[j] = self.psfs.im_from_ft(mod_ft)
#Find the mean square uncertainty of this fit
chi2s[j] = np.sum( (mod_ims[j] - self.ims[i])**2 * self.ivar[i] )
#pdb.set_trace()
#Find the best image
best_ixs[i] = np.argmin(chi2s)
chi2_total += np.min(chi2s)
best_fit_ims[i] = mod_ims[best_ixs[i]]
if return_lnprob:
return -chi2_total/2.0
else:
return best_ixs, best_fit_ims
def marginalise_best_psf(self, init_par=[],walker_sdev=[],nchain=100, nburnin=50, use_threads=True):
"""Use the affine invariant Monte-Carlo Markov chain technique to marginalise
over all PSFs.
This brute force algorithm takes a long time, because for every parameter it fits,
it runs a monte-carlo chain which requires nwalkers * nchain evaluations of
find_best_psfs, which in turn requires N_psfs * N_target_frames evaluations of
optimize_tilt.
e.g. if running this over a 100 x 100 grid, fitting for 1 parameter with 6 walkers
and a chain length of 100, 100 psfs and 50 target frames, this is 3 x 10^10
evaluations of optimize_tilt.
An alternative to this would be to just add 2 model parameters per target image,
i.e. the tilt of each image, which e.g. could be 50 parameters for 50 target
images. The problem with this is that it would then require nwalkers to be
50 times larger in the case of fitting to only 1 parameter (e.g. contrast).
Parameters
----------
"""
threads = multiprocessing.cpu_count()
#Minimum number of walkers
ndim = len(init_par)
nwalkers = 2*ndim
kwargs = {"return_lnprob":True}
if use_threads:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.find_best_psfs, threads=threads, kwargs=kwargs)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.find_best_psfs, kwargs=kwargs)
#Initialise the chain to random parameters.
p0 = np.empty( (nwalkers, ndim) )
for i in range(nwalkers):
p0[i] = init_par + np.random.normal(size=len(init_par))*walker_sdev
import time
then = time.time()
best_ixs, best_fit_ims = self.find_best_psfs(p0[0])
now = time.time()
print(now-then)
print("Running Chain... (burn in)")
pos, prob, state = sampler.run_mcmc(p0,nburnin)
sampler.reset()
print("Running Chain... ")
sampler.run_mcmc(pos,nchain)
print("Best lnprob: {0:5.2f}".format(np.max(sampler.lnprobability)))
best_x = sampler.flatchain[np.argmax(sampler.flatlnprobability)]
return best_x, sampler
|
mikeireland/pynrm
|
psf_marginalise.py
|
Python
|
mit
| 37,909
|
[
"Gaussian"
] |
d09ea34b3a0a4deff98ff0b3d3cb9abd9f630a7e668a03e6a2368a3a03386f3c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""
USAGE: $ python -m gensim.scripts.word2vec2tensor --input <Word2Vec model file> --output <TSV tensor filename prefix> [--binary] <Word2Vec binary flag>
Where:
<Word2Vec model file>: Input Word2Vec model
<TSV tensor filename prefix>: 2D tensor TSV output file name prefix
<Word2Vec binary flag>: Set True if Word2Vec model is binary. Defaults to False.
Output:
The script will create two TSV files. A 2d tensor format file, and a Word Embedding metadata file. Both files will
us the --output file name as prefix
This script is used to convert the word2vec format to Tensorflow 2D tensor and metadata formats for Embedding Visualization
To use the generated TSV 2D tensor and metadata file in the Projector Visualizer, please
1) Open http://projector.tensorflow.org/.
2) Choose "Load Data" from the left menu.
3) Select "Choose file" in "Load a TSV file of vectors." and choose you local "_tensor.tsv" file
4) Select "Choose file" in "Load a TSV file of metadata." and choose you local "_metadata.tsv" file
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
"""
import os
import sys
import random
import logging
import argparse
import gensim
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path,tensor_filename, binary=False):
'''
Convert Word2Vec mode to 2D tensor TSV file and metadata file
Args:
param1 (str): word2vec model file path
param2 (str): filename prefix
param2 (bool): set True to use a binary Word2Vec model, defaults to False
'''
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with open(outfiletsv, 'w+') as file_vector:
with open(outfiletsvmeta, 'w+') as file_metadata:
for word in model.index2word:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(map(str, model[word]))
file_vector.write(vector_row + '\n')
logger.info("2D tensor file saved to %s" % outfiletsv)
logger.info("Tensor metadata file saved to %s" % outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", required=True,
help="Input word2vec model")
parser.add_argument(
"-o", "--output", required=True,
help="Output tensor file name prefix")
parser.add_argument( "-b", "--binary",
required=False,
help="If word2vec model in binary format, set True, else False")
args = parser.parse_args()
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", program)
|
duyet-website/api.duyet.net
|
lib/gensim/scripts/word2vec2tensor.py
|
Python
|
mit
| 3,539
|
[
"VisIt"
] |
7e43282163bfa8259a96c58c66a274a0cb3512aa5accb6ab85572df5fac633a4
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.electrostatics
import espressomd.interactions
from espressomd.drude_helpers import DrudeHelpers
@utx.skipIfMissingFeatures(["P3M", "ELECTROSTATICS", "THOLE",
"THERMOSTAT_PER_PARTICLE", "MASS"])
class Drude(ut.TestCase):
def test(self):
"""
Sets up a BMIM PF6 pair separated in y-direction with fixed cores.
Adds the Drude particles and related features (intramolecular exclusion
bonds, Thole screening) via helper functions.
Calculates the induced dipole moment and the diagonals of the
polarization tensor and compares against reference results, which where
reproduced with LAMMPS.
"""
box_l = 50
system = espressomd.System(box_l=[box_l, box_l, box_l])
np.random.seed(12)
# Reference Results, reproduced with LAMMPS
# Dipole Moments
ref_mu0_pf6 = [0.00177594, 0.16480996, -0.01605161]
ref_mu0_c1 = [0.00076652, 0.15238767, 0.00135291]
ref_mu0_c2 = [-0.00020222, 0.11084197, 0.00135842]
ref_mu0_c3 = [0.00059177, 0.23949626, -0.05238468]
ref_mu0_bmim = [0.00115606, 0.5027259, -0.04967335]
# Polarisation Tensor diagonals
ref_pol_pf6 = [
4.5535698335873445, 4.7558611769477697, 4.5546580162000554]
ref_pol_bmim = [
13.126868394164262, 14.392582501485913, 16.824150151623762]
# TIMESTEP
fs_to_md_time = 1.0e-2
time_step_fs = 0.5
dt = time_step_fs * fs_to_md_time
# COM TEMPERATURE
# Global thermostat temperature, for com and langevin.
# LangevinPerParticle temperature is set to 0 for drude and core to
# properly account for com forces. Like that, langevin thermostat can
# still be used for non-drude particles
SI_temperature = 300.0
gamma_com = 1.0
kb_kjmol = 0.0083145
temperature_com = SI_temperature * kb_kjmol
# COULOMB PREFACTOR (elementary charge)^2 / (4*pi*epsilon_0) in
# Angstrom * kJ/mol
coulomb_prefactor = 1.67101e5 * kb_kjmol
# POLARIZATION
# polarization = 1.0 (in (Angstrom^3)_CGS)
# alpha_SI = 4*Pi*eps_0 alpha_CGS;
# 4*Pi*epsilon_0*Angstrom^3/((elementary charge)^2*Angstrom^2*N_A/kJ)
conv_pol_CGS_SI = 7.197586e-4
# alpha = conv_pol_CGS_SI*args.polarization
# DRUDE/TOTAL MASS
# lamoureux03 used values 0.1-0.8 g/mol for drude mass
mass_drude = 0.8
mass_tot = 100.0
mass_core = mass_tot - mass_drude
mass_red_drude = mass_drude * mass_core / mass_tot
# SPRING CONSTANT DRUDE
# Used 1000kcal/mol/A^2 from lamoureux03a table 1 p 3031
k_drude = 4184.0
# in kJ/mol/A^2
T_spring = 2.0 * np.pi * np.sqrt(mass_drude / k_drude)
# T_spring_fs = T_spring/fs_to_md_time
# Period of free oscillation: T_spring = 2Pi/w; w = sqrt(k_d/m_d)
# TEMP DRUDE
# Used T* = 1K from lamoureux03a p 3031 (2) 'Cold drude oscillators
# regime'
SI_temperature_drude = 1.0
temperature_drude = SI_temperature_drude * kb_kjmol
# GAMMA DRUDE
# Thermostat relaxation time should be similar to T_spring
gamma_drude = mass_red_drude / T_spring
system.cell_system.skin = 0.4
system.time_step = dt
# Forcefield
types = {"PF6": 0, "BMIM_C1": 1, "BMIM_C2": 2, "BMIM_C3": 3,
"BMIM_COM": 4, "PF6_D": 5, "BMIM_C1_D": 6, "BMIM_C2_D": 7,
"BMIM_C3_D": 8}
charges = {"PF6": -0.78, "BMIM_C1": 0.4374,
"BMIM_C2": 0.1578, "BMIM_C3": 0.1848, "BMIM_COM": 0}
polarizations = {"PF6": 4.653, "BMIM_C1": 5.693,
"BMIM_C2": 2.103, "BMIM_C3": 7.409}
masses = {"PF6": 144.96, "BMIM_C1": 67.07,
"BMIM_C2": 15.04, "BMIM_C3": 57.12, "BMIM_COM": 0}
masses["BMIM_COM"] = masses["BMIM_C1"] + \
masses["BMIM_C2"] + masses["BMIM_C3"]
box_center = 0.5 * np.array(3 * [box_l])
system.min_global_cut = 3.5
# Place Particles
dmol = 5.0
# anion
pos_pf6 = box_center + np.array([0, dmol, 0])
part0 = system.part.add(
type=types["PF6"], pos=pos_pf6, q=charges["PF6"],
mass=masses["PF6"], fix=[1, 1, 1])
# cation
pos_com = box_center - np.array([0, dmol, 0])
part2 = system.part.add(id=2, type=types["BMIM_C1"],
pos=pos_com + [0, -0.527, 1.365], q=charges["BMIM_C1"],
mass=masses["BMIM_C1"], fix=[1, 1, 1])
part4 = system.part.add(id=4, type=types["BMIM_C2"],
pos=pos_com + [0, 1.641, 2.987], q=charges["BMIM_C2"],
mass=masses["BMIM_C2"], fix=[1, 1, 1])
part6 = system.part.add(id=6, type=types["BMIM_C3"],
pos=pos_com + [0, 0.187, -2.389], q=charges["BMIM_C3"],
mass=masses["BMIM_C3"], fix=[1, 1, 1])
system.thermostat.set_langevin(
kT=temperature_com,
gamma=gamma_com,
seed=42)
p3m = espressomd.electrostatics.P3M(prefactor=coulomb_prefactor,
accuracy=1e-4, mesh=3 * [18], cao=5)
system.actors.add(p3m)
# Drude related Bonds
thermalized_dist_bond = espressomd.interactions.ThermalizedBond(
temp_com=temperature_com, gamma_com=gamma_com, r_cut=1.0,
temp_distance=temperature_drude, gamma_distance=gamma_drude, seed=123)
harmonic_bond = espressomd.interactions.HarmonicBond(
k=k_drude, r_0=0.0, r_cut=1.0)
system.bonded_inter.add(thermalized_dist_bond)
system.bonded_inter.add(harmonic_bond)
dh = DrudeHelpers()
part1 = dh.add_drude_particle_to_core(
system, harmonic_bond, thermalized_dist_bond, part0,
types["PF6_D"], polarizations["PF6"], mass_drude,
coulomb_prefactor, 2.0)
part3 = dh.add_drude_particle_to_core(
system, harmonic_bond, thermalized_dist_bond, part2,
types["BMIM_C1_D"], polarizations["BMIM_C1"], mass_drude,
coulomb_prefactor, 2.0)
part5 = dh.add_drude_particle_to_core(
system, harmonic_bond, thermalized_dist_bond, part4,
types["BMIM_C2_D"], polarizations["BMIM_C2"], mass_drude,
coulomb_prefactor, 2.0)
part7 = dh.add_drude_particle_to_core(
system, harmonic_bond, thermalized_dist_bond, part6,
types["BMIM_C3_D"], polarizations["BMIM_C3"], mass_drude,
coulomb_prefactor, 2.0)
# Setup and add Drude-Core SR exclusion bonds
dh.setup_and_add_drude_exclusion_bonds(system)
# Setup intramol SR exclusion bonds once
dh.setup_intramol_exclusion_bonds(
system,
[types["BMIM_C1_D"], types["BMIM_C2_D"], types["BMIM_C3_D"]],
[types["BMIM_C1"], types["BMIM_C2"], types["BMIM_C3"]],
[charges["BMIM_C1"], charges["BMIM_C2"], charges["BMIM_C3"]])
# Add bonds per molecule
dh.add_intramol_exclusion_bonds(
[part3, part5, part7], [part2, part4, part6])
# Thole
dh.add_all_thole(system)
def dipole_moment(core_part, drude_part):
v = drude_part.pos - core_part.pos
return drude_part.q * v
def measure_dipole_moments():
dm_pf6 = []
dm_C1 = []
dm_C2 = []
dm_C3 = []
system.integrator.run(115)
for _ in range(100):
system.integrator.run(1)
dm_pf6.append(dipole_moment(part0, part1))
dm_C1.append(dipole_moment(part2, part3))
dm_C2.append(dipole_moment(part4, part5))
dm_C3.append(dipole_moment(part6, part7))
dm_pf6_m = np.mean(dm_pf6, axis=0)
dm_C1_m = np.mean(dm_C1, axis=0)
dm_C2_m = np.mean(dm_C2, axis=0)
dm_C3_m = np.mean(dm_C3, axis=0)
dm_sum_bmim = dm_C1_m + dm_C2_m + dm_C3_m
res = dm_pf6_m, dm_C1_m, dm_C2_m, dm_C3_m, dm_sum_bmim
return res
def setElectricField(E):
E = np.array(E)
for p in system.part:
p.ext_force = p.q * E
def calc_pol(mu0, muE, E):
pol = (muE - mu0) / E / conv_pol_CGS_SI
return pol
def measure_pol(Es, dim):
E = [0.0, 0.0, 0.0]
E[dim] = Es
setElectricField(E)
mux_pf6, _, _, _, mux_bmim = measure_dipole_moments()
return calc_pol(mu0_pf6[dim], mux_pf6[dim], Es), calc_pol(
mu0_bmim[dim], mux_bmim[dim], Es)
mu0_pf6, mu0_c1, mu0_c2, mu0_c3, mu0_bmim = measure_dipole_moments()
eA_to_Debye = 4.8032047
atol = 1e-2
rtol = 1e-2
np.testing.assert_allclose(
ref_mu0_pf6, eA_to_Debye * mu0_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c1, eA_to_Debye * mu0_c1, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c2, eA_to_Debye * mu0_c2, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_c3, eA_to_Debye * mu0_c3, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_mu0_bmim, eA_to_Debye * mu0_bmim, atol=atol, rtol=rtol)
pol_pf6 = []
pol_bmim = []
# E = 1 V/A in kJ / (Avogadro Number) / Angstrom / elementary charge
Efield = 96.48536
res = measure_pol(Efield, 0)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 1)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
res = measure_pol(Efield, 2)
pol_pf6.append(res[0])
pol_bmim.append(res[1])
np.testing.assert_allclose(ref_pol_pf6, pol_pf6, atol=atol, rtol=rtol)
np.testing.assert_allclose(
ref_pol_bmim,
pol_bmim,
atol=atol,
rtol=rtol)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/drude.py
|
Python
|
gpl-3.0
| 11,112
|
[
"Avogadro",
"ESPResSo",
"LAMMPS"
] |
b2365a35738e07fbceb5ab1d6113d2c54d1ff642d0b4fc23af691320d5254705
|
"""
=======================================
Simulate raw data using subject anatomy
=======================================
This example illustrates how to generate source estimates and simulate raw data
using subject anatomy with the :class:`mne.simulation.SourceSimulator` class.
Once the raw data is simulated, generated source estimates are reconstructed
using dynamic statistical parametric mapping (dSPM) inverse operator.
"""
# Author: Ivana Kojcic <ivana.kojcic@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Kostiantyn Maksymenko <kostiantyn.maksymenko@gmail.com>
# Samuel Deslauriers-Gauthier <sam.deslauriers@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
print(__doc__)
# In this example, raw data will be simulated for the sample subject, so its
# information needs to be loaded. This step will download the data if it not
# already on your machine. Subjects directory is also set so it doesn't need
# to be given to functions.
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
meg_path = op.join(data_path, 'MEG', subject)
# First, we get an info structure from the sample subject.
fname_info = op.join(meg_path, 'sample_audvis_raw.fif')
info = mne.io.read_info(fname_info)
tstep = 1 / info['sfreq']
# To simulate sources, we also need a source space. It can be obtained from the
# forward solution of the sample subject.
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
# To simulate raw data, we need to define when the activity occurs using events
# matrix and specify the IDs of each event.
# Noise covariance matrix also needs to be defined.
# Here, both are loaded from the sample dataset, but they can also be specified
# by the user.
fname_event = op.join(meg_path, 'sample_audvis_raw-eve.fif')
fname_cov = op.join(meg_path, 'sample_audvis-cov.fif')
events = mne.read_events(fname_event)
noise_cov = mne.read_cov(fname_cov)
# Standard sample event IDs. These values will correspond to the third column
# in the events matrix.
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
# Take only a few events for speed
events = events[:80]
###############################################################################
# In order to simulate source time courses, labels of desired active regions
# need to be specified for each of the 4 simulation conditions.
# Make a dictionary that maps conditions to activation strengths within
# aparc.a2009s [1]_ labels. In the aparc.a2009s parcellation:
#
# - 'G_temp_sup-G_T_transv' is the label for primary auditory area
# - 'S_calcarine' is the label for primary visual area
#
# In each of the 4 conditions, only the primary area is activated. This means
# that during the activations of auditory areas, there are no activations in
# visual areas and vice versa.
# Moreover, for each condition, contralateral region is more active (here, 2
# times more) than the ipsilateral.
activations = {
'auditory/left':
[('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm)
('G_temp_sup-G_T_transv-rh', 60)],
'auditory/right':
[('G_temp_sup-G_T_transv-lh', 60),
('G_temp_sup-G_T_transv-rh', 30)],
'visual/left':
[('S_calcarine-lh', 30),
('S_calcarine-rh', 60)],
'visual/right':
[('S_calcarine-lh', 60),
('S_calcarine-rh', 30)],
}
annot = 'aparc.a2009s'
# Load the 4 necessary label names.
label_names = sorted(set(activation[0]
for activation_list in activations.values()
for activation in activation_list))
region_names = list(activations.keys())
###############################################################################
# Create simulated source activity
# --------------------------------
#
# Generate source time courses for each region. In this example, we want to
# simulate source activity for a single condition at a time. Therefore, each
# evoked response will be parametrized by latency and duration.
def data_fun(times, latency, duration):
"""Function to generate source time courses for evoked responses,
parametrized by latency and duration."""
f = 15 # oscillating frequency, beta band [Hz]
sigma = 0.375 * duration
sinusoid = np.sin(2 * np.pi * f * (times - latency))
gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 /
(2 * (sigma ** 2)))
return 1e-9 * sinusoid * gf
###############################################################################
# Here, :class:`~mne.simulation.SourceSimulator` is used, which allows to
# specify where (label), what (source_time_series), and when (events) event
# type will occur.
#
# We will add data for 4 areas, each of which contains 2 labels. Since add_data
# method accepts 1 label per call, it will be called 2 times per area.
#
# Evoked responses are generated such that the main component peaks at 100ms
# with a duration of around 30ms, which first appears in the contralateral
# cortex. This is followed by a response in the ipsilateral cortex with a peak
# about 15ms after. The amplitude of the activations will be 2 times higher in
# the contralateral region, as explained before.
#
# When the activity occurs is defined using events. In this case, they are
# taken from the original raw data. The first column is the sample of the
# event, the second is not used. The third one is the event id, which is
# different for each of the 4 areas.
times = np.arange(150, dtype=np.float) / info['sfreq']
duration = 0.03
rng = np.random.RandomState(7)
source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)
for region_id, region_name in enumerate(region_names, 1):
events_tmp = events[np.where(events[:, 2] == region_id)[0], :]
for i in range(2):
label_name = activations[region_name][i][0]
label_tmp = mne.read_labels_from_annot(subject, annot,
subjects_dir=subjects_dir,
regexp=label_name,
verbose=False)
label_tmp = label_tmp[0]
amplitude_tmp = activations[region_name][i][1]
if region_name.split('/')[1][0] == label_tmp.hemi[0]:
latency_tmp = 0.115
else:
latency_tmp = 0.1
wf_tmp = data_fun(times, latency_tmp, duration)
source_simulator.add_data(label_tmp,
amplitude_tmp * wf_tmp,
events_tmp)
# To obtain a SourceEstimate object, we need to use `get_stc()` method of
# SourceSimulator class.
stc_data = source_simulator.get_stc()
###############################################################################
# Simulate raw data
# -----------------
#
# Project the source time series to sensor space. Three types of noise will be
# added to the simulated raw data:
#
# - multivariate Gaussian noise obtained from the noise covariance from the
# sample data
# - blink (EOG) noise
# - ECG noise
#
# The :class:`~mne.simulation.SourceSimulator` can be given directly to the
# :func:`~mne.simulation.simulate_raw` function.
raw_sim = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)
raw_sim.set_eeg_reference(projection=True)
mne.simulation.add_noise(raw_sim, cov=noise_cov, random_state=0)
mne.simulation.add_eog(raw_sim, random_state=0)
mne.simulation.add_ecg(raw_sim, random_state=0)
# Plot original and simulated raw data.
raw_sim.plot(title='Simulated raw data')
###############################################################################
# Extract epochs and compute evoked responsses
# --------------------------------------------
#
epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3,
baseline=(None, 0))
evoked_aud_left = epochs['auditory/left'].average()
evoked_vis_right = epochs['visual/right'].average()
# Visualize the evoked data
evoked_aud_left.plot(spatial_colors=True)
evoked_vis_right.plot(spatial_colors=True)
###############################################################################
# Reconstruct simulated source time courses using dSPM inverse operator
# ---------------------------------------------------------------------
#
# Here, source time courses for auditory and visual areas are reconstructed
# separately and their difference is shown. This was done merely for better
# visual representation of source reconstruction.
# As expected, when high activations appear in primary auditory areas, primary
# visual areas will have low activations and vice versa.
method, lambda2 = 'dSPM', 1. / 9.
inv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov)
stc_aud = mne.minimum_norm.apply_inverse(
evoked_aud_left, inv, lambda2, method)
stc_vis = mne.minimum_norm.apply_inverse(
evoked_vis_right, inv, lambda2, method)
stc_diff = stc_aud - stc_vis
brain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1,
hemi='split', views=['lat', 'med'])
###############################################################################
# References
# ----------
# .. [1] Destrieux C, Fischl B, Dale A, Halgren E (2010). Automatic
# parcellation of human cortical gyri and sulci using standard
# anatomical nomenclature, vol. 53(1), 1-15, NeuroImage.
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/348fd8e6a5e2100dbbbef2df40123b87/plot_simulated_raw_data_using_subject_anatomy.py
|
Python
|
bsd-3-clause
| 9,580
|
[
"Gaussian"
] |
2c013c030fec859f42b1c480c1d1d066f6b5adb9dbf3d664b2605141be36a721
|
"""Identify program versions used for analysis, reporting in structured table.
Catalogs the full list of programs used in analysis, enabling reproduction of
results and tracking of provenance in output files.
"""
import os
import contextlib
import subprocess
import sys
import yaml
import toolz as tz
from bcbio import utils
from bcbio.pipeline import config_utils, version
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
_cl_progs = [{"cmd": "bamtofastq", "name": "biobambam",
"args": "--version", "stdout_flag": "This is biobambam version"},
{"cmd": "bamtools", "args": "--version", "stdout_flag": "bamtools"},
{"cmd": "bcftools", "stdout_flag": "Version:"},
{"cmd": "bedtools", "args": "--version", "stdout_flag": "bedtools"},
{"cmd": "bowtie2", "args": "--version", "stdout_flag": "bowtie2-align version"},
{"cmd": "bwa", "stdout_flag": "Version:"},
{"cmd": "chanjo"},
{"cmd": "cufflinks", "stdout_flag": "cufflinks"},
{"cmd": "cutadapt", "args": "--version"},
{"cmd": "fastqc", "args": "--version", "stdout_flag": "FastQC"},
{"cmd": "freebayes", "stdout_flag": "version:"},
{"cmd": "gemini", "args": "--version", "stdout_flag": "gemini "},
{"cmd": "novosort", "paren_flag": "novosort"},
{"cmd": "novoalign", "stdout_flag": "Novoalign"},
{"cmd": "samtools", "stdout_flag": "Version:"},
{"cmd": "sambamba", "stdout_flag": "sambamba"},
{"cmd": "qualimap", "args": "-h", "stdout_flag": "QualiMap"},
{"cmd": "vcflib", "has_cl_version": False},
{"cmd": "featurecounts", "args": "-v", "stdout_flag": "featureCounts"}]
def _broad_versioner(type):
def get_version(config):
from bcbio import broad
try:
runner = broad.runner_from_config(config)
except ValueError:
return ""
if type == "gatk":
return runner.get_gatk_version()
elif type == "picard":
return runner.get_picard_version("ViewSam")
elif type == "mutect":
try:
runner = broad.runner_from_config(config, "mutect")
except ValueError:
return ""
return runner.get_mutect_version()
else:
raise NotImplementedError(type)
return get_version
def jar_versioner(program_name, jar_name):
"""Retrieve version information based on jar file.
"""
def get_version(config):
try:
pdir = config_utils.get_program(program_name, config, "dir")
# not configured
except ValueError:
return ""
jar = os.path.basename(config_utils.get_jar(jar_name, pdir))
for to_remove in [jar_name, ".jar", "-standalone"]:
jar = jar.replace(to_remove, "")
if jar.startswith(("-", ".")):
jar = jar[1:]
if not jar:
logger.warn("Unable to determine version for program '{}' from jar file {}".format(
program_name, config_utils.get_jar(jar_name, pdir)))
return jar
return get_version
def java_versioner(pname, jar_name, **kwargs):
def get_version(config):
try:
pdir = config_utils.get_program(pname, config, "dir")
except ValueError:
return ""
jar = config_utils.get_jar(jar_name, pdir)
kwargs["cmd"] = "java"
kwargs["args"] = "-Xms128m -Xmx256m -jar %s" % jar
return _get_cl_version(kwargs, config)
return get_version
_alt_progs = [{"name": "bcbio_variation",
"version_fn": jar_versioner("bcbio_variation", "bcbio.variation")},
{"name": "gatk", "version_fn": _broad_versioner("gatk")},
{"name": "mutect",
"version_fn": _broad_versioner("mutect")},
{"name": "picard", "version_fn": _broad_versioner("picard")},
{"name": "rnaseqc",
"version_fn": jar_versioner("rnaseqc", "RNA-SeQC")},
{"name": "snpeff",
"version_fn": java_versioner("snpeff", "snpEff", stdout_flag="snpEff version SnpEff")},
{"name": "varscan",
"version_fn": jar_versioner("varscan", "VarScan")},
{"name": "oncofuse",
"version_fn": jar_versioner("Oncofuse", "Oncofuse")},
{"name": "alientrimmer",
"version_fn": jar_versioner("AlienTrimmer", "AlienTrimmer")}
]
def _parse_from_stdoutflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
parts = [p for p in line[line.find(x) + len(x):].split() if p.strip()]
return parts[0].strip()
return ""
def _parse_from_parenflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
return line.split("(")[-1].split(")")[0]
return ""
def _get_cl_version(p, config):
"""Retrieve version of a single commandline program.
"""
if not p.get("has_cl_version", True):
return ""
try:
prog = config_utils.get_program(p["cmd"], config)
except config_utils.CmdNotFound:
localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"])
if os.path.exists(localpy_cmd):
prog = localpy_cmd
else:
return ""
args = p.get("args", "")
cmd = "{prog} {args}"
subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
if p.get("stdout_flag"):
v = _parse_from_stdoutflag(stdout, p["stdout_flag"])
elif p.get("paren_flag"):
v = _parse_from_parenflag(stdout, p["paren_flag"])
else:
lines = [l.strip() for l in stdout.read().split("\n") if l.strip()]
v = lines[-1]
if v.endswith("."):
v = v[:-1]
return v
def _get_brew_versions():
"""Retrieve versions of tools installed via brew.
"""
from bcbio import install
tooldir = install.get_defaults().get("tooldir")
brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew"
try:
vout = subprocess.check_output([brew_cmd, "list", "--versions"])
except OSError: # brew not installed/used
vout = ""
out = {}
for vstr in vout.split("\n"):
if vstr.strip():
parts = vstr.rstrip().split()
name = parts[0]
v = parts[-1]
out[name] = v
return out
def _get_versions(config=None):
"""Retrieve details on all programs available on the system.
"""
out = [{"program": "bcbio-nextgen",
"version": ("%s-%s" % (version.__version__, version.__git_revision__)
if version.__git_revision__ else version.__version__)}]
manifest_dir = _get_manifest_dir(config)
manifest_vs = _get_versions_manifest(manifest_dir)
if manifest_vs:
return out + manifest_vs
else:
assert config is not None, "Need configuration to retrieve from non-manifest installs"
brew_vs = _get_brew_versions()
for p in _cl_progs:
out.append({"program": p["cmd"],
"version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else
_get_cl_version(p, config))})
for p in _alt_progs:
out.append({"program": p["name"],
"version": (brew_vs[p["name"]] if p["name"] in brew_vs else
p["version_fn"](config))})
return out
def _get_manifest_dir(data=None):
"""
get manifest directory from the data dictionary, falling back on alternatives
it prefers, in order:
1. locating it from the bcbio_system.yaml file
2. locating it from the galaxy directory
3. location it from the python executable.
it can accept either the data or config dictionary
"""
manifest_dir = None
if data:
bcbio_system = tz.get_in(["config", "bcbio_system"], data, None)
bcbio_system = bcbio_system if bcbio_system else data.get("bcbio_system", None)
if bcbio_system:
sibling_dir = os.path.normpath(os.path.dirname(bcbio_system))
else:
sibling_dir = dd.get_galaxy_dir(data)
if sibling_dir:
manifest_dir = os.path.normpath(os.path.join(sibling_dir, os.pardir,
"manifest"))
if not manifest_dir or not os.path.exists(manifest_dir):
manifest_dir = os.path.join(config_utils.get_base_installdir(), "manifest")
return manifest_dir
def _get_versions_manifest(manifest_dir):
"""Retrieve versions from a pre-existing manifest of installed software.
"""
all_pkgs = ["htseq", "cn.mops", "vt", "platypus-variant", "gatk-framework", "samblaster"] + \
[p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs]
if os.path.exists(manifest_dir):
out = []
for plist in ["toolplus", "brew", "python", "r", "debian", "custom"]:
pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist)
if os.path.exists(pkg_file):
with open(pkg_file) as in_handle:
pkg_info = yaml.safe_load(in_handle)
added = []
for pkg in all_pkgs:
if pkg in pkg_info:
added.append(pkg)
out.append({"program": pkg, "version": pkg_info[pkg]["version"]})
for x in added:
all_pkgs.remove(x)
out.sort(key=lambda x: x["program"])
for pkg in all_pkgs:
out.append({"program": pkg, "version": ""})
return out
def _get_program_file(dirs):
if dirs.get("work"):
base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance"))
return os.path.join(base_dir, "programs.txt")
def write_versions(dirs, config=None, is_wrapper=False):
"""Write CSV file with versions used in analysis pipeline.
"""
out_file = _get_program_file(dirs)
if is_wrapper:
assert utils.file_exists(out_file), "Failed to create program versions from VM"
elif out_file is None:
for p in _get_versions(config):
print("{program},{version}".format(**p))
else:
with open(out_file, "w") as out_handle:
for p in _get_versions(config):
out_handle.write("{program},{version}\n".format(**p))
return out_file
def get_version_manifest(name, data=None, required=False):
"""Retrieve a version from the currently installed manifest.
"""
manifest_dir = _get_manifest_dir(data)
manifest_vs = _get_versions_manifest(manifest_dir)
for x in manifest_vs:
if x["program"] == name:
v = x.get("version", "")
if v:
return v
if required:
raise ValueError("Did not find %s in install manifest. Could not check version." % name)
return ""
def add_subparser(subparsers):
"""Add command line option for exporting version information.
"""
parser = subparsers.add_parser("version",
help="Export versions of used software to stdout or a file ")
parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt",
default=None)
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = config["resources"]["program_versions"]
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
|
fw1121/bcbio-nextgen
|
bcbio/provenance/programs.py
|
Python
|
mit
| 12,080
|
[
"BWA",
"Galaxy",
"HTSeq"
] |
796e276f44072da64cd1ae22fd1d617db4827f0fb2201ae972b7c24292f8bac3
|
# -*- coding: utf-8 -*-
from osv import fields
from datetime import date,datetime,time
import logging
_logger = logging.getLogger(__name__)
#时间段选择
def time_for_selection(self,cr,uid,context = None):
ret = [("%02i:00" % i,"%02i时30分" % i) for i in range(24)] + [("%02i:30" % i,"%02i时00分" % (i+1)) for i in range(24)]
ret.sort()
ret.pop()
ret.append(("23:59","23时59分"))
return ret
#价格列表
def price_list_for_selection(self,cr,uid,context = None):
ret =[("ting_price","大厅价"),("room_price","包厢价"),("member_price","会员价"),("vip_price","贵宾价"),("a_price","A类价"),("b_price","B类价")]
return ret
#房态定义
def room_states_for_selection(self,cr,uid,context = None):
ret =[("free","空闲"),("in_use","使用"),("scheduled","预定"),("locked","锁定"),("checkout","已结账"),("buyout","买断"),("buytime","买钟"),("malfunction","故障"),("clean","清洁"),("debug","调试"),("visit","带客")]
return ret
#男女
def sexes_for_select(self,cr,uid,context = None):
ret=[("F","女"),("M","男")]
return ret
#证件类型
def id_types_for_select(self,cr,uid,context = None):
ret=[(1,"身份证"),(2,"驾驶证"),(3,"其他证件")]
return ret
#根据0 1 2 3 4 5 6 分别返回星期缩写 min =0 ~ sun= 6
def weekday_str(weekday_int):
weekday_dict = {
0 : 'mon',
1 : 'tue',
2 : 'wed',
3 : 'thu',
4 : 'fri',
5 : 'sat',
6 : 'sun'
}
return weekday_dict[weekday_int]
def current_user_tz(obj,cr,uid,context = None):
"""
获取当前登录用户的时区设置
:param cursor cr 数据库游标
:params integer uid 当前登录用户id
"""
the_user = obj.pool.get('res.users').read(cr,uid,uid,['id','context_tz','name'])
return the_user['context_tz']
def user_context_now(obj,cr,uid):
"""
获取当前登录用户的本地日期时间
:return 本地化的当前日期
"""
tz = current_user_tz(obj,cr,uid)
context_now = fields.datetime.context_timestamp(cr,uid,datetime.now(),{"tz" : tz})
return context_now
def minutes_delta(time_from,time_to):
'''
计算给定两个时间的相差分钟数
:param time_from string 形式是'09:30'的字符串,指的是起始时间
:param time_to string 形式是'09:30'的字符串,指的是结束时间时间
:return integer 两个时间的相差分钟数
'''
array_time_from = [int(a) for a in time_from.split(':')]
array_time_to = [int(a) for a in time_to.split(':')]
t1 = time(array_time_from[0],array_time_from[1])
t2 = time(array_time_to[0],array_time_to[1])
return (t2.hour - t1.hour)*60 + (t2.minute - t1.minute)
def context_now_minutes_delta(obj,cr,uid,time_to):
'''
计算当前时间到给定时间的相差分钟数,该计算是以当期登录用户所在时区进行计算的
:param object obj osv对象
:param cursot cr 数据库游标
:param integer uid 当前登录用户
:param string time_to 当前时间
:return integer 两个时间的相差分钟数
'''
context_now = user_context_now(obj,cr,uid)
return minutes_delta(context_now.strftime("%H:%M"),time_to)
def context_strptime(osv_obj,cr,uid,str_time):
'''
将给定的时间字符串转变为当日的时间,以当前登录用户的时区为标准
:param osv_obj osv数据库对象
:param cr db cursor
:param int uid 当前登录用户
:param str_time 形式为'09:30'的时间字符串
:return datetime 计算过后的日期对象
'''
context_now = user_context_now(osv_obj,cr,uid)
time_array = [int(a) for a in str_time.split(":")]
ret = context_now.replace(hour=time_array[0],minute=time_array[1])
return ret
def str_to_today_time(time):
'''
将给定的字符串转换为当日的datetime
:params time 形式如 09:30:00形式的时间字符串
:return 日期为当日,时间为传入参数的datetime对象
'''
now = datetime.now()
array_time = [int(a) for a in time.split(':')]
ret = now.replace(hour=array_time[0],minute = array_time[1],second = array_time[2])
return ret
def utc_time_between(str_time_from,str_time_to,str_cur_time):
"""
判断给定的时间字符串是否在给定的时间区间内
由于对时间统一采用UTC时间保存,可能存在time_to < time_from的情况
:params string str_time_from 形式类似 09:10的时间字符串
:params string str_time_to 形式类似 09:10的时间字符串
:params str_cur_time 要比较的时间字符串
:return True 在范围内 else False
"""
if str_time_to > str_time_from:
return str_cur_time >= str_time_from and str_cur_time <= str_time_to
else:
#如果存在time_from 大于 time_to的情况,则说明时间跨天
return (str_cur_time >= str_time_from and str_cur_time < '23:59:59') or (str_cur_time >='00:00:00' and str_cur_time <= str_time_to)
def calculate_present_minutes(buy_minutes,promotion_buy_minutes = 0,promotion_present_minutes = 0):
"""
根据给定的参数计算赠送时长
买钟时间(分钟数) / 设定买钟时长(分钟数) * 赠送时长
:params buy_minutes integer 买钟时间
:params promotion_buy_minutes integer 买钟优惠设置中设定的买钟时长
:params promotion_present_minutes integer 买钟优惠设置中设定的赠送时长
:return integer 赠送时长
"""
#如果未设置优惠信息,则不赠送,直接返回买钟时间
if not promotion_buy_minutes:
return buy_minutes
present_minutes = buy_minutes / promotion_buy_minutes * promotion_present_minutes
return present_minutes
|
chengdh/openerp-ktv
|
openerp/addons/ktv_sale/ktv_helper.py
|
Python
|
agpl-3.0
| 5,772
|
[
"VisIt"
] |
fa7392b32c257735cd583f1313bf9825f49d5647734173ea0ba506ec16ad39e9
|
"""
@name: C:/Users/briank/workspace/PyHouse/src/Modules.Core.Utilities._test/test_obj_defs.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 7, 2015
@Summary:
Passed all 3 tests - DBK - 2016-11-22
"""
__updated__ = '2016-11-22'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files
from Modules.Core.Utilities.obj_defs import GetPyhouse
from test.xml_data import XML_LONG
from test.testing_mixin import SetupPyHouseObj
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_obj_defs')
class D1_GetPyHouse(SetupMixin, unittest.TestCase):
"""Test GetPyhouse class functionality
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_House(self):
l_pyh = GetPyhouse(self.m_pyhouse_obj).House()
# self.assertEqual(l_pyh.Name, 'Test House')
# self.assertEqual(l_pyh.Key, 0)
# self.assertEqual(l_pyh.Active, True)
def test_01_Schedules(self):
l_pyh = GetPyhouse(self.m_pyhouse_obj).Schedules()
# self.assertEqual(l_pyh.Schedules, {})
pass
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Utilities/_test/test_obj_defs.py
|
Python
|
mit
| 1,513
|
[
"Brian"
] |
2a0b3c78072276370967d136f751f3824234c8bf3e6e2aad0a36d26cfd0b0636
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from scipy.ndimage import correlate1d, _ni_support, gaussian_filter, \
binary_erosion
from scipy import math
def square_gaussian_filter1d(input, sigma, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""One-dimensional Squared Gaussian filter.
The standard-deviation of the Gaussian filter is given by
sigma.
"""
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(- 0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
weights[ii] = weights[ii]**2
return correlate1d(input, weights, axis, output, mode, cval, 0)
def square_gaussian_filter(input, sigma, output = None, mode = "reflect", cval = 0.0):
"""Multi-dimensional Squared Gaussian filter.
The standard-deviations of the Gaussian filter are given for each
axis as a sequence, or as a single number, in which case it is
equal for all axes.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = np.asarray(input)
output, return_value =_ni_support._get_output(output, input)
sigmas =_ni_support._normalize_sequence(sigma, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sigmas[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma in axes:
square_gaussian_filter1d(input, sigma, axis, output,
mode, cval)
input = output
else:
output[...] = input[...]
return return_value
class displacement_field(object):
"""
Sampling of multiple vector-valued displacement fields on a 3D-lattice.
Displacement fields are generated as linear combinations of fixed displacements.
The coefficients are random Gaussian variables.
"""
def __init__(self, XYZ, sigma, n=1, mask=None, step=None):
"""
Input :
XYZ (3,p) array of voxel coordinates
sigma <float> standard deviate of Gaussian filter kernel
Each displacement block has length 4*sigma
n <int> number of generated displacement fields.
mask (q,) displacement blocks are limited to mask
The constructor creates the following fields :
self.block List of N block masks (voxel index vectors)
self.weights List of N block weights (same shape as the masks)
self.U (3,n,N) Displacement coefficients
self.V (3,n,p) Displacements
self.W (3,n,p) Discretize displacements
self.I (n,p) Displaced voxels index
(voxel k in the mask is displaced by field i to voxel self.I[i,k])
"""
self.XYZ = XYZ
self.sigma = sigma
if np.isscalar(sigma):
self.sigma = sigma * np.ones(3)
self.n = n
self.XYZ_min = self.XYZ.min(axis=1).reshape(3, 1) - 1
self.XYZ_max = self.XYZ.max(axis=1).reshape(3, 1) + 1
p = XYZ.shape[1]
if mask == None:
self.mask = np.arange(p)
else:
self.mask = mask
if step == None:
self.step = int(round(2 * self.sigma.max()))
else:
self.step = step
self.V = np.zeros((3, n, p), float)
self.W = np.zeros((3, n, p), int)
self.I = np.arange(p) * np.ones((n, p), int)
self.XYZ_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1
self.XYZ_vol[XYZ[0], XYZ[1], XYZ[2]] = np.arange(p)
self.init_displacement_blocks()
self.compute_inner_blocks()
self.U = np.zeros((3, n, len(self.block)), float)
def init_displacement_blocks(self):
"""
Called by class constructor
"""
XYZ = self.XYZ
# displacement kernel
sigma = self.sigma.max()
#r = int(round(2 * sigma))
d = int(round(6 * sigma))
block_dim = (\
self.XYZ.max(axis=1)+1 - \
self.XYZ.min(axis=1)).clip(1,d)
#kernel = np.zeros(d * np.ones(3), float)
kernel = np.zeros(block_dim, float)
kernel[block_dim[0]/2-1:block_dim[0]/2+1,
block_dim[1]/2-1:block_dim[1]/2+1,
block_dim[2]/2-1:block_dim[2]/2+1] += 1
kernel = gaussian_filter(kernel.squeeze(), sigma, mode='constant')
kernel = kernel.reshape(block_dim)
kernel /= kernel.max()
# displacement 'blocks'
self.block = []
self.weights = []
mask_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1
mask_vol[list(XYZ[:, self.mask])] = self.mask
Xm, Ym, Zm = XYZ.min(axis=1).astype(int)
XM, YM, ZM = XYZ.max(axis=1).clip(1,np.inf).astype(int)
for i in xrange(Xm, XM, self.step):
for j in xrange(Ym, YM, self.step):
for k in xrange(Zm, ZM, self.step):
block_vol = mask_vol[i:i + d, j:j + d, k:k + d]
XYZ_block = np.array( np.where( block_vol > -1 ) )
if XYZ_block.size > 0 \
and (kernel[list(XYZ_block)] > 0.05).sum() == (kernel > 0.05).sum():
#print i,j,k
self.block.append(block_vol[XYZ_block[0], XYZ_block[1], XYZ_block[2]])
self.weights.append(kernel[XYZ_block[0], XYZ_block[1], XYZ_block[2]])
def compute_inner_blocks(self):
"""
Generate self.inner_blocks, index of blocks which are "far from" the borders of the lattice.
"""
XYZ = self.XYZ
sigma = self.sigma.max()
mask_vol = np.zeros(XYZ.max(axis=1) + 1, int)
mask_vol[XYZ[0], XYZ[1], XYZ[2]] += 1
mask_vol = binary_erosion(mask_vol.squeeze(), iterations=int(round(sigma))).astype(int)
mask_vol = mask_vol.reshape(XYZ.max(axis=1) + 1).astype(int)
inner_mask = mask_vol[XYZ[0], XYZ[1], XYZ[2]]
inner_blocks = []
for i in xrange(len(self.block)):
if inner_mask[self.block[i]].min() == 1:
inner_blocks.append(i)
self.inner_blocks = np.array(inner_blocks)
def sample(self, i, b, proposal='prior', proposal_std=None, proposal_mean=None):
"""
Generates U, V, L, W, I, where U, V, W, I are proposals for
self.U[:,i,b], self.V[:,i,block], self.W[:,i,L], self.I[i,L] if block = self.block[b].
W and I are given only in those voxels, indexed by L, where they differ from current values.
Proposal is either 'prior', 'rand_walk' or 'fixed'
"""
block = self.block[b]
# Current values
Uc = self.U[:, i, b]
Vc = self.V[:, i, block]
Wc = self.W[:, i, block]
Ic = self.I[i, block]
# Proposals
valid_proposal = False
while not valid_proposal:
if proposal == 'prior':
U = np.random.randn(3) * proposal_std
elif proposal == 'rand_walk':
U = Uc + np.random.randn(3) * proposal_std
else:
U = proposal_mean + np.random.randn(3) * proposal_std
V = Vc + (self.weights[b].reshape(1, -1) * (U - Uc).reshape(3,1))
#print U
W = np.round(V).astype(int)
L = np.where((W == Wc).prod(axis=0) == 0)[0]
XYZ_W = np.clip(self.XYZ[:, block[L]] + W[:, L], self.XYZ_min, self.XYZ_max)
I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]]
#print (I == -1).sum()
if len(L) == 0:
valid_proposal = True
elif min(I) > -1:
valid_proposal = True
return U, V, block[L], W[:, L], I
def sample_all_blocks(self, proposal_std=None, proposal_mean=None):
"""
Generates U, V, W, I, proposals for self.U[:, i], self.V[:, i], self.W[:, i], self.I[i].
Proposal is either 'prior', 'rand_walk' or 'fixed'
"""
B = len(self.block)
p = self.XYZ.shape[1]
V = np.zeros((3, p), float)
I = -np.ones(p, int)
while min(I) == -1:
U = np.random.randn(3, B) * proposal_std
if proposal_mean != None:
U += proposal_mean
V *= 0
for b in xrange(B):
V[:, self.block[b]] += self.weights[b].reshape(1, -1) * U[:, b].reshape(3,1)
W = np.round(V).astype(int)
XYZ_W = np.clip(self.XYZ + W, self.XYZ_min, self.XYZ_max)
I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]]
return U, V, W, I
class gaussian_random_field(object):
def __init__(self, XYZ, sigma, n=1):
self.XYZ = XYZ
self.sigma = sigma
if np.isscalar(sigma):
self.sigma = sigma * (XYZ.max(axis=1) > 1)
self.n = n
self.XYZ_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1
p = XYZ.shape[1]
self.XYZ_vol[list(XYZ)] = np.arange(p)
mask_vol = np.zeros(XYZ.max(axis=1) + 1, int)
mask_vol[list(XYZ)] += 1
mask_vol = binary_erosion(mask_vol.squeeze(), iterations=int(round(1.5*self.sigma.max())))
mask_vol = mask_vol.reshape(XYZ.max(axis=1) + 1).astype(int)
XYZ_mask = np.array(np.where(mask_vol > 0))
self.mask = self.XYZ_vol[XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]]
q = len(self.mask)
dX, dY, dZ = XYZ.max(axis=1) + 1
self.U_vol = np.zeros((3, dX, dY, dZ), float)
self.U_vol[:, XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]] += 1
self.U_vol = square_gaussian_filter(self.U_vol, [0, self.sigma[0], self.sigma[1], self.sigma[2]], mode='constant')
self.norm_coeff = 1 / np.sqrt(self.U_vol.max())
self.U = np.zeros((3, n, q), float)
self.V = np.zeros((3, n, p), float)
self.W = np.zeros((3, n, p), int)
self.I = np.arange(p).reshape(1, p) * np.ones((n, 1), int)
self.XYZ_min = self.XYZ.min(axis=1).reshape(3, 1) - 1
self.XYZ_max = self.XYZ.max(axis=1).reshape(3, 1) + 1
def sample(self, i, std):
mask = self.mask
q = len(mask)
XYZ = self.XYZ
sigma = self.sigma
Wc = self.W[:, i]
valid = False
if np.isscalar(std):
std = std * np.ones((3,1))
while not valid:
U = np.random.randn(3, q) * std
self.U_vol *= 0
self.U_vol[:, XYZ[0, mask], XYZ[1, mask], XYZ[2, mask]] = U
self.U_vol = gaussian_filter(self.U_vol, [0, sigma[0], sigma[1], sigma[2]], mode='constant')
V = self.U_vol[:, XYZ[0], XYZ[1], XYZ[2]] * self.norm_coeff
W = np.round(V).astype(int)
L = np.where((W == Wc).prod(axis=0) == 0)[0]
XYZ_W = np.clip(XYZ[:, L] + W[:, L], self.XYZ_min, self.XYZ_max)
I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]]
if len(L) == 0:
valid = True
elif min(I) > -1:
valid = True
#self.U[:, i], self.V[:, i], self.W[:, i, L], self.I[i, L] = U, V, W[:, L], I
return U, V, L, W[:, L], I
|
nipy/nipy-labs
|
nipy/labs/group/displacement_field.py
|
Python
|
bsd-3-clause
| 11,761
|
[
"Gaussian"
] |
cb9d68e1caf76efe1355585720d1c65d7e0882dd6ed984427bd6b41810dec12b
|
# A bunch of CONVERTERS for the Fit part of Alphabet.
import os
import math
from array import array
import optparse
import ROOT
from ROOT import *
import scipy
#### LINEAR ####
class LinearFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("LinearFit_"+self.name, "[0]+ [1]*x",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[1])
def Converter(self, fitter):
self.ErrUp = TF1("LinearFitErrorUp"+self.name, "[0]+ [1]*x + sqrt((x*x*[3]*[3])+(x*2*[4])+([2]*[2]))",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(3, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(4, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn = TF1("LinearFitErrorDn"+self.name, "[0]+ [1]*x - sqrt((x*x*[3]*[3])+(x*2*[4])+([2]*[2]))",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(3, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(4, fitter.GetCovarianceMatrixElement(0,1))
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({2})*{1:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),X)
self.ConvFactUp = "({0:2.9f} + (({5})*{1:2.9f}) + (({5})*({5})*{3:2.9f}*{3:2.9f}+(({5})*2*{4:2.9f})+({2:2.9f}*{2:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),X)
self.ConvFactDn = "({0:2.9f} + (({5})*{1:2.9f}) - (({5})*({5})*{3:2.9f}*{3:2.9f}+(({5})*2*{4:2.9f})+({2:2.9f}*{2:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),X)
#### QUADRATIC ####
class QuadraticFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("QuadraticFit", "[0]+ [1]*x + [2]*x*x",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[0])
self.fit.SetParameter(2, init_var[0])
#self.fit.SetParLimits(2,0,20)
def Converter(self, fitter):
self.ErrUp = TF1("QuadrarticFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + sqrt(([3]*[3]) + (2*x*[6]) + (x*x*[4]*[4]) + (2*x*x*[7]) + (2*x*x*x*[8]) + (x*x*x*x*[5]*[5]))",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParameter(2))
self.ErrUp.SetParameter(3, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(4, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(5, self.fit.GetParErrors()[2])
self.ErrUp.SetParameter(6, fitter.GetCovarianceMatrixElement(0,1))
self.ErrUp.SetParameter(7, fitter.GetCovarianceMatrixElement(0,2))
self.ErrUp.SetParameter(8, fitter.GetCovarianceMatrixElement(1,2))
self.ErrDn = TF1("QuadrarticFitErrorDn"+self.name, "[0]+ [1]*x + [2]*x*x - sqrt(([3]*[3]) + (2*x*[6]) + (x*x*[4]*[4]) + (2*x*x*[7]) + (2*x*x*x*[8]) + (x*x*x*x*[5]*[5]))",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParameter(2))
self.ErrDn.SetParameter(3, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(4, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(5, self.fit.GetParErrors()[2])
self.ErrDn.SetParameter(6, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn.SetParameter(7, fitter.GetCovarianceMatrixElement(0,2))
self.ErrDn.SetParameter(8, fitter.GetCovarianceMatrixElement(1,2))
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({3})*{1:2.9f}) + (({3})*({3})*{2:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),X)
self.ConvFactUp = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) + (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
self.ConvFactDn = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) - (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
#### CUBIC ####
class CubicFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("CubeicFit"+self.name, "[0]+ [1]*x + [2]*x^2 + [3]*x^3",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[1])
self.fit.SetParameter(2, init_var[2])
self.fit.SetParameter(3, init_var[3])
def Converter(self, fitter):
#errTerm = "[4]^2 +((2*[8])*x) + (([5]^2+2*[9])*x^2) + ((2*[11])*x^3)"
errTerm = "[4]^2+((2*[8])*x)+(([5]^2+2*[9])*x^2)+((2*[10]+2*[11])*x^3)+(([6]^2+2*[12])*x^4)+((2*[13])*x^5)+(([7]^2)*x^6)"
self.ErrUp = TF1("CubicFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + [3]*x*x*x + sqrt("+errTerm+")",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParameter(2))
self.ErrUp.SetParameter(3, self.fit.GetParameter(3))
self.ErrUp.SetParameter(4, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(5, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(6, self.fit.GetParErrors()[2])
self.ErrUp.SetParameter(7, self.fit.GetParErrors()[3])
self.ErrUp.SetParameter(8, fitter.GetCovarianceMatrixElement(0,1))
self.ErrUp.SetParameter(9, fitter.GetCovarianceMatrixElement(0,2))
self.ErrUp.SetParameter(10, fitter.GetCovarianceMatrixElement(0,3))
self.ErrUp.SetParameter(11, fitter.GetCovarianceMatrixElement(1,2))
self.ErrUp.SetParameter(12, fitter.GetCovarianceMatrixElement(1,3))
self.ErrUp.SetParameter(13, fitter.GetCovarianceMatrixElement(2,3))
self.ErrDn = TF1("CubicFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + [3]*x*x*x - sqrt("+errTerm+")",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParameter(2))
self.ErrDn.SetParameter(3, self.fit.GetParameter(3))
self.ErrDn.SetParameter(4, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(5, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(6, self.fit.GetParErrors()[2])
self.ErrDn.SetParameter(7, self.fit.GetParErrors()[3])
self.ErrDn.SetParameter(8, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn.SetParameter(9, fitter.GetCovarianceMatrixElement(0,2))
self.ErrDn.SetParameter(10, fitter.GetCovarianceMatrixElement(0,3))
self.ErrDn.SetParameter(11, fitter.GetCovarianceMatrixElement(1,2))
self.ErrDn.SetParameter(12, fitter.GetCovarianceMatrixElement(1,3))
self.ErrDn.SetParameter(13, fitter.GetCovarianceMatrixElement(2,3))
for i in [self.ErrUp, self.ErrDn]:
i.SetLineStyle(2)
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({3})*{1:2.9f}) + (({3})*({3})*{2:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),X)
self.ConvFactUp = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) + (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
self.ConvFactDn = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) - (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
#### LOGARITHMIC ####
#### EXPONENTIAL ####
#### GAUSSIAN ####
#CUSTOM =========--------------=============------------=============-------------===============
|
anovak10/plots
|
DDTmethod/Converters.py
|
Python
|
mit
| 9,313
|
[
"Gaussian"
] |
5b1e3072c241966c52f3e74beca93c5ab7be83318ef02bd288fe6700b73de835
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import strftime # use for clock simulation - shows time!
from time import sleep # use for delay in loops - wait for n sec.!
from threading import Thread # use to create a single threat for time
import sys # for unicode_to_kos0006u
import types # for unicode_to_kos0006u
import socket # check if tinker is available
from tinkerforge.ip_connection import IPConnection
from tinkerforge.brick_master import Master
from tinkerforge.bricklet_io16 import IO16
from tinkerforge.bricklet_rotary_poti import RotaryPoti
from tinkerforge.bricklet_lcd_20x4 import LCD20x4
from tinkerforge.bricklet_joystick import Joystick
from tinkerforge.bricklet_industrial_quad_relay import IndustrialQuadRelay
try:
from Board import Board as B
from Menu import Menu as M
except ImportError as err:
print err
def isOpen(ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
class master():
"""docstring for master"""
def __init__(self):
#super(master, self).__init__()
print 'init...'
self.PORT = 4223
self.MENU_running = False
self.BOARD_running = False
### Connection for Menu
self.MENU_HOST = "192.168.0.150" # Manually Set IP of Controller Board
self.MENU_lcdUID = "gFt" # LCD Screen
self.MENU_jskUID = "hAP" # Joystick
### END MENU CONNECTION
### Connection for Board
self.BOARD_HOST = "192.168.0.111"
self.BOARD_mstUID = "62eUEf" # master brick
self.BOARD_io1UID = "ghh" # io16
self.BOARD_lcdUID = "9ew" # lcd screen 20x4
self.BOARD_iqrUID = "eRN" # industrial quad relay
#### END BOARD CONNECTION
return
def start(self):
if self.BOARD_running: print 'Board already running!'
else: self.startBoard(); print 'Board Started!'
if self.MENU_running: print 'Menu already running!'
else: self.startMenu(); print 'Menu Started!'
return 'Started!'
def status(self):
return 'Board: '+str(self.BOARD_running)+'\nMenu: '+str(self.MENU_running)
def startBoard(self):
if self.BOARD_running: return 'Board already running!'
if isOpen(self.BOARD_HOST, self.PORT):
self.BOARD_running = True
self.BOARD_ipcon = IPConnection() # Create IP connection
self.mst = Master(self.BOARD_mstUID, self.BOARD_ipcon) # Master Brick
self.io1 = IO16(self.BOARD_io1UID, self.BOARD_ipcon) # io16
self.lcd1 = LCD20x4(self.BOARD_lcdUID, self.BOARD_ipcon) # lcd20x4
self.iqr = IndustrialQuadRelay(self.BOARD_iqrUID, self.BOARD_ipcon) # Create device object
self.BOARD_ipcon.connect(self.BOARD_HOST, self.PORT) # Connect to brickd
# create Board instance
self.BB = B(self.mst, self.io1, self.lcd1, self.iqr, self.BOARD_ipcon)
else:
return 'Board is offline'
return "Hello, Board successfully started!"
def startMenu(self):
if self.MENU_running: return 'Menu already running!'
if isOpen(self.MENU_HOST, self.PORT):
self.MENU_running = True
# Connect to WLAN Controller
self.MENU_ipcon = IPConnection() # Create IP connection
self.lcd = LCD20x4(self.MENU_lcdUID, self.MENU_ipcon) # Create device object LCD
self.jsk = Joystick(self.MENU_jskUID, self.MENU_ipcon) # Create device object JOYSTICK
# Don't use device before ipcon is connected
self.MENU_ipcon.connect(self.MENU_HOST, self.PORT) # Connect to brickd
# create Menu instance with the nessesary Hardware # IPCON to close Tinker Connection
self.MM = M(self.jsk, self.lcd, self.MENU_ipcon)
else:
return 'Menu is offline'
return "Hello, Menu successfully started!"
def stop(self):
print 'stopping devices...'
if self.MENU_running:
self.MENU_running = False
self.MM.quit()
if self.BOARD_running:
self.BOARD_running = False
self.BB.quit() # Stop Board
#quit()
return 'successfully stopped'
if __name__ == "__main__":
try:
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the address given on the command line
server_name = '127.0.0.1'#'192.168.0.111'
server_address = (server_name, 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
masterInstance = master()
print masterInstance.status()
# On Press close Application
function_dict = {
'start':masterInstance.start,
'status':masterInstance.status,
'startMenu':masterInstance.startMenu,
'startBoard':masterInstance.startBoard,
'stop':masterInstance.stop,
}
while True:
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'client connected:', client_address
while True:
data = connection.recv(16)
print >>sys.stderr, 'received "%s"' % data
if data:
if data in function_dict:#== 'status':
function_dict[data]()
print "process will staart!"
#print data
#connection.sendall(data)
else:
break
finally:
connection.close()
#raw_input('Press key to exit\n') # Use input() in Python 3
masterInstance.stop()
except Exception as errtxt:
print errtxt
|
DeathPoison/roomControll
|
Socket/socketMaster.py
|
Python
|
mit
| 6,222
|
[
"TINKER"
] |
fe29700c03a4285d91d738d995c40a87f38b8b0f46dfc00e4a8349a454558ec6
|
"""A module containing convenient methods for general machine learning"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import zip
from builtins import int
from builtins import range
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
from builtins import object
__author__ = 'wittawat'
import autograd.numpy as np
import time
class ContextTimer(object):
"""
A class used to time an execution of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print('elapsed time: %f ms' % (self.secs*1000))
# end class ContextTimer
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
# end NumpySeedContext
class ChunkIterable(object):
"""
Construct an Iterable such that each call to its iterator returns a tuple
of two indices (f, t) where f is the starting index, and t is the ending
index of a chunk. f and t are (chunk_size) apart except for the last tuple
which will always cover the rest.
"""
def __init__(self, start, end, chunk_size):
self.start = start
self.end = end
self.chunk_size = chunk_size
def __iter__(self):
s = self.start
e = self.end
c = self.chunk_size
# Probably not a good idea to use list. Waste memory.
L = list(range(s, e, c))
L.append(e)
return zip(L, L[1:])
# end ChunkIterable
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*X.dot(Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def dist2_matrix(X, Y):
"""
Construct a pairwise Euclidean distance **squared** matrix of size
X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
return D2
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(X):
"""return true if x is a real number.
Work for a numpy array as well. Return an array of the same dimension."""
def each_elem_true(x):
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except:
return False
f = np.vectorize(each_elem_true)
return f(X)
def tr_te_indices(n, tr_proportion, seed=9282 ):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=32):
"""
Return a list of indices to choose k out of n without replacement
"""
with NumpySeedContext(seed=seed):
ind = np.random.choice(n, k, replace=False)
return ind
def subsample_rows(X, k, seed=29):
"""
Subsample k rows from the matrix X.
"""
n = X.shape[0]
if k > n:
raise ValueError('k exceeds the number of rows.')
ind = subsample_ind(n, k, seed=seed)
return X[ind, :]
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
def bound_by_data(Z, Data):
"""
Determine lower and upper bound for each dimension from the Data, and project
Z so that all points in Z live in the bounds.
Z: m x d
Data: n x d
Return a projected Z of size m x d.
"""
n, d = Z.shape
Low = np.min(Data, 0)
Up = np.max(Data, 0)
LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)
Z = np.maximum(LowMat, Z)
Z = np.minimum(UpMat, Z)
return Z
def one_of_K_code(arr):
"""
Make a one-of-K coding out of the numpy array.
For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form
[[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1]]
"""
U = np.unique(arr)
n = len(arr)
nu = len(U)
X = np.zeros((n, nu))
for i, u in enumerate(U):
Ii = np.where( np.abs(arr - u) < 1e-8 )
#ni = len(Ii)
X[Ii[0], i] = 1
return X
def fullprint(*args, **kwargs):
"https://gist.github.com/ZGainsforth/3a306084013633c52881"
from pprint import pprint
import numpy
opt = numpy.get_printoptions()
numpy.set_printoptions(threshold='nan')
pprint(*args, **kwargs)
numpy.set_printoptions(**opt)
def standardize(X):
mx = np.mean(X, 0)
stdx = np.std(X, axis=0)
# Assume standard deviations are not 0
Zx = old_div((X-mx),stdx)
assert np.all(np.isfinite(Zx))
return Zx
def outer_rows(X, Y):
"""
Compute the outer product of each row in X, and Y.
X: n x dx numpy array
Y: n x dy numpy array
Return an n x dx x dy numpy array.
"""
# Matlab way to do this. According to Jonathan Huggins, this is not
# efficient. Use einsum instead. See below.
#n, dx = X.shape
#dy = Y.shape[1]
#X_col_rep = X[:, np.tile(range(dx), (dy, 1)).T.reshape(-1) ]
#Y_tile = np.tile(Y, (1, dx))
#Z = X_col_rep*Y_tile
#return np.reshape(Z, (n, dx, dy))
return np.einsum('ij,ik->ijk', X, Y)
def randn(m, n, seed=3):
with NumpySeedContext(seed=seed):
return np.random.randn(m, n)
def matrix_inner_prod(A, B):
"""
Compute the matrix inner product <A, B> = trace(A^T * B).
"""
assert A.shape[0] == B.shape[0]
assert A.shape[1] == B.shape[1]
return A.reshape(-1).dot(B.reshape(-1))
def get_classpath(obj):
"""
Return the full module and class path of the obj. For instance,
kgof.density.IsotropicNormal
Return a string.
"""
return obj.__class__.__module__ + '.' + obj.__class__.__name__
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
|
wittawatj/kernel-gof
|
kgof/util.py
|
Python
|
mit
| 9,350
|
[
"Gaussian"
] |
942f671017b6b810d5c209e098a3e8ac4ea3af898693ef00d13a187225393268
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of changing_pH_ferric_hydroxide.i
import os
import sys
import matplotlib.pyplot as plt
f = open("gold/changing_pH_ferric_hydroxide_out.csv", "r")
data = [list(map(float, line.strip().split(","))) for line in f.readlines()[2:]]
f.close()
sFeO = [x[1] * 1000 for x in data]
sFeOH = [x[2] * 1000 for x in data]
sFeOH2 = [x[3] * 1000 for x in data]
wFeO = [x[4] * 1000 for x in data]
wFeOH = [x[5] * 1000 for x in data]
wFeOH2 = [x[6] * 1000 for x in data]
pH = [x[7] for x in data]
pot = [x[8] * 1000 for x in data]
plt.figure(0)
plt.plot(pH, sFeO, 'k--', linewidth = 2.0, label = '>(s)FeO-')
plt.plot(pH, sFeOH, 'r-', linewidth = 2.0, label = '>(s)FeOH')
plt.plot(pH, sFeOH2, 'g-', linewidth = 2.0, label = '>(s)FeOH2+')
plt.plot(pH, wFeO, 'b-', linewidth = 2.0, label = '>(w)FeO-')
plt.plot(pH, wFeOH, 'y-', linewidth = 2.0, label = '>(w)FeOH')
plt.plot(pH, wFeOH2, 'k-', linewidth = 2.0, label = '>(w)FeOH2+')
plt.legend()
plt.xlabel("pH")
plt.ylabel("Species concentration (mmolal)")
plt.title("Concentrations of sites on Fe(OH)3(ppd)")
plt.savefig("../../../doc/content/media/geochemistry/changing_pH_ferric_hydroxide_fig1.png")
plt.figure(1)
plt.plot(pH, pot, 'k-', linewidth = 2.0, label = 'potential')
plt.xlabel("pH")
plt.ylabel("Surface potential (mV)")
plt.title("Surface potential of Fe(OH)3(ppd)")
plt.savefig("../../../doc/content/media/geochemistry/changing_pH_ferric_hydroxide_fig2.png")
sys.exit(0)
|
harterj/moose
|
modules/geochemistry/test/tests/time_dependent_reactions/changing_pH_ferric_hydroxide.py
|
Python
|
lgpl-2.1
| 1,773
|
[
"MOOSE"
] |
c48bed83c1af273cfd88810e49d51045073a83b8b537db464da415d0bb4df8f6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import json
import numpy as np
from pymatgen import Lattice, Structure, Specie, Element
from pymatgen.transformations.standard_transformations import \
OxidationStateDecorationTransformation, SubstitutionTransformation, \
OrderDisorderedStructureTransformation, AutoOxiStateDecorationTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation, EnumerateStructureTransformation, \
MultipleSubstitutionTransformation, ChargeBalanceTransformation, \
SubstitutionPredictorTransformation, MagOrderingTransformation, \
DopingTransformation, _find_codopant, SlabTransformation, \
MagOrderParameterConstraint
from monty.os.path import which
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.energy_models import IsingModel
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.surface import SlabGenerator
"""
Created on Jul 24, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 24, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
def get_table():
"""
Loads a lightweight lambda table for use in unit tests to reduce
initialization time, and make unit tests insensitive to changes in the
default lambda table.
"""
data_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'struct_predictor')
json_file = os.path.join(data_dir, 'test_lambda.json')
with open(json_file) as f:
lambda_table = json.load(f)
return lambda_table
enum_cmd = which('enum.x') or which('multienum.x')
makestr_cmd = which('makestr.x') or which('makeStr.x') or which('makeStr.py')
enumlib_present = enum_cmd and makestr_cmd
class SuperTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
tl = [SubstitutionTransformation({"Li+": "Na+"}),
SubstitutionTransformation({"Li+": "K+"})]
t = SuperTransformation(tl)
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct, return_ranked_list=True)
for s_and_t in s:
self.assertEqual(s_and_t['transformation']
.apply_transformation(struct),
s_and_t['structure'])
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
def test_apply_transformation_mult(self):
#Test returning multiple structures from each transformation.
disord = Structure(np.eye(3) * 4.209, [{"Cs+": 0.5, "K+": 0.5}, "Cl-"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
disord.make_supercell([2, 2, 1])
tl = [EnumerateStructureTransformation(),
OrderDisorderedStructureTransformation()]
t = SuperTransformation(tl, nstructures_per_trans=10)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 8)
t = SuperTransformation(tl)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 2)
class MultipleSubstitutionTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
sub_dict = {1: ["Na", "K"]}
t = MultipleSubstitutionTransformation("Li+", 0.5, sub_dict, None)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
self.assertEqual(len(t.apply_transformation(struct,
return_ranked_list=True)),
2)
class ChargeBalanceTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = ChargeBalanceTransformation('Li+')
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertAlmostEqual(s.charge, 0, 5)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumerateStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
enum_trans = EnumerateStructureTransformation(refine_structure=True)
enum_trans2 = EnumerateStructureTransformation(refine_structure=True,
sort_criteria="nsites")
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
struct = p.structure
expected_ans = [1, 3, 1]
for i, frac in enumerate([0.25, 0.5, 0.75]):
trans = SubstitutionTransformation({'Fe': {'Fe': frac}})
s = trans.apply_transformation(struct)
oxitrans = OxidationStateDecorationTransformation(
{'Li': 1, 'Fe': 2, 'P': 5, 'O': -2})
s = oxitrans.apply_transformation(s)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), expected_ans[i])
self.assertIsInstance(trans.apply_transformation(s), Structure)
for ss in alls:
self.assertIn("energy", ss)
alls = enum_trans2.apply_transformation(s, 100)
self.assertEqual(len(alls), expected_ans[i])
self.assertIsInstance(trans.apply_transformation(s), Structure)
for ss in alls:
self.assertIn("num_sites", ss)
# make sure it works for non-oxidation state decorated structure
trans = SubstitutionTransformation({'Fe': {'Fe': 0.5}})
s = trans.apply_transformation(struct)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), 3)
self.assertIsInstance(trans.apply_transformation(s), Structure)
for s in alls:
self.assertNotIn("energy", s)
def test_max_disordered_sites(self):
l = Lattice.cubic(4)
s_orig = Structure(l, [{"Li": 0.2, "Na": 0.2, "K": 0.6}, {"O": 1}],
[[0, 0, 0], [0.5, 0.5, 0.5]])
est = EnumerateStructureTransformation(max_cell_size=None,
max_disordered_sites=5)
s = est.apply_transformation(s_orig)
self.assertEqual(len(s), 8)
def test_to_from_dict(self):
trans = EnumerateStructureTransformation()
d = trans.as_dict()
trans = EnumerateStructureTransformation.from_dict(d)
self.assertEqual(trans.symm_prec, 0.1)
class SubstitutionPredictorTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = SubstitutionPredictorTransformation(threshold=1e-3, alpha=-5,
lambda_table=get_table())
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ['O2-', 'Li1+', 'Li1+'], coords)
outputs = t.apply_transformation(struct, return_ranked_list=True)
self.assertEqual(len(outputs), 4, 'incorrect number of structures')
def test_as_dict(self):
t = SubstitutionPredictorTransformation(threshold=2, alpha=-2,
lambda_table=get_table())
d = t.as_dict()
t = SubstitutionPredictorTransformation.from_dict(d)
self.assertEqual(t.threshold, 2,
'incorrect threshold passed through dict')
self.assertEqual(t._substitutor.p.alpha, -2,
'incorrect alpha passed through dict')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class MagOrderingTransformationTest(PymatgenTest):
def setUp(self):
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0],
[0.5, 0.5, 0.5]]
self.NiO = Structure.from_spacegroup(225, latt, species, coords)
latt = Lattice([[2.085, 2.085, 0.0],
[0.0, -2.085, -2.085],
[-2.085, 2.085, -4.17]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0, 0.5],
[0, 0, 0],
[0.25, 0.5, 0.25],
[0.75, 0.5, 0.75]]
self.NiO_AFM_111 = Structure(latt, species, coords)
self.NiO_AFM_111.add_spin_by_site([-5, 5, 0, 0])
latt = Lattice([[2.085, 2.085, 0],
[0, 0, -4.17],
[-2.085, 2.085, 0]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0.5, 0.5],
[0, 0, 0],
[0, 0.5, 0],
[0.5, 0, 0.5]]
self.NiO_AFM_001 = Structure(latt, species, coords)
self.NiO_AFM_001.add_spin_by_site([-5, 5, 0, 0])
parser = CifParser(os.path.join(test_dir, 'Fe3O4.cif'))
self.Fe3O4 = parser.get_structures()[0]
trans = AutoOxiStateDecorationTransformation()
self.Fe3O4_oxi = trans.apply_transformation(self.Fe3O4)
parser = CifParser(os.path.join(test_dir, 'Li8Fe2NiCoO8.cif'))
self.Li8Fe2NiCoO8 = parser.get_structures()[0]
self.Li8Fe2NiCoO8.remove_oxidation_states()
def test_apply_transformation(self):
trans = MagOrderingTransformation({"Fe": 5})
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 3)
f = SpacegroupAnalyzer(alls[0]["structure"], 0.1)
self.assertEqual(f.get_space_group_number(), 31)
model = IsingModel(5, 5)
trans = MagOrderingTransformation({"Fe": 5},
energy_model=model)
alls2 = trans.apply_transformation(s, 10)
# Ising model with +J penalizes similar neighbor magmom.
self.assertNotEqual(alls[0]["structure"], alls2[0]["structure"])
self.assertEqual(alls[0]["structure"], alls2[2]["structure"])
s = self.get_structure('Li2O')
# Li2O doesn't have magnetism of course, but this is to test the
# enumeration.
trans = MagOrderingTransformation({"Li+": 1}, max_cell_size=3)
alls = trans.apply_transformation(s, 100)
# TODO: check this is correct, unclear what len(alls) should be
self.assertEqual(len(alls), 12)
trans = MagOrderingTransformation({"Ni": 5})
alls = trans.apply_transformation(self.NiO.get_primitive_structure(),
return_ranked_list=10)
self.assertEqual(self.NiO_AFM_111.lattice, alls[0]["structure"].lattice)
self.assertEqual(self.NiO_AFM_001.lattice, alls[1]["structure"].lattice)
def test_ferrimagnetic(self):
trans = MagOrderingTransformation({"Fe": 5}, order_parameter=0.75,
max_cell_size=1)
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 2)
def test_as_from_dict(self):
trans = MagOrderingTransformation({"Fe": 5}, order_parameter=0.75)
d = trans.as_dict()
# Check json encodability
s = json.dumps(d)
trans = MagOrderingTransformation.from_dict(d)
self.assertEqual(trans.mag_species_spin, {"Fe": 5})
from pymatgen.analysis.energy_models import SymmetryModel
self.assertIsInstance(trans.energy_model, SymmetryModel)
def test_zero_spin_case(self):
# ensure that zero spin case maintains sites and formula
s = self.get_structure('Li2O')
trans = MagOrderingTransformation({"Li+": 0.0}, order_parameter=0.5)
alls = trans.apply_transformation(s)
Li_site = alls.indices_from_symbol('Li')[0]
# Ensure s does not have a spin property
self.assertFalse('spin' in s.sites[Li_site].specie._properties)
# ensure sites are assigned a spin property in alls
self.assertTrue('spin' in alls.sites[Li_site].specie._properties)
self.assertEqual(alls.sites[Li_site].specie._properties['spin'], 0)
def test_advanced_usage(self):
# test spin on just one oxidation state
magtypes = {"Fe2+": 5}
trans = MagOrderingTransformation(magtypes)
alls = trans.apply_transformation(self.Fe3O4_oxi)
self.assertIsInstance(alls, Structure)
self.assertEqual(str(alls[0].specie), "Fe2+,spin=5")
self.assertEqual(str(alls[2].specie), "Fe3+")
# test multiple order parameters
# this should only order on Fe3+ site, but assign spin to both
magtypes = {"Fe2+": 5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(1, species_constraints="Fe2+"),
MagOrderParameterConstraint(0.5, species_constraints="Fe3+")
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi)
# using this 'sorted' syntax because exact order of sites in first
# returned structure varies between machines: we just want to ensure
# that the order parameter is accurate
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=5", "Fe2+,spin=5"]))
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=5",
"Fe3+,spin=-5", "Fe3+,spin=-5"]))
self.assertEqual(str(alls[0].specie), "Fe2+,spin=5")
# this should give same results as previously
# but with opposite sign on Fe2+ site
magtypes = {"Fe2+": -5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(1, species_constraints="Fe2+"),
MagOrderParameterConstraint(0.5, species_constraints="Fe3+")
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi)
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=-5", "Fe2+,spin=-5"]))
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=5",
"Fe3+,spin=-5", "Fe3+,spin=-5"]))
# while this should order on both sites
magtypes = {"Fe2+": 5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(0.5, species_constraints="Fe2+"),
MagOrderParameterConstraint(0.25, species_constraints="Fe3+")
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi)
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=5", "Fe2+,spin=-5"]))
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=-5",
"Fe3+,spin=-5", "Fe3+,spin=-5"]))
# add coordination numbers to our test case
# don't really care what these are for the test case
cns = [6, 6, 6, 6, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0]
self.Fe3O4.add_site_property('cn', cns)
# this should give FM ordering on cn=4 sites, and AFM ordering on cn=6 sites
magtypes = {"Fe": 5}
order_parameters = [
MagOrderParameterConstraint(0.5, species_constraints="Fe",
site_constraint_name="cn", site_constraints=6),
MagOrderParameterConstraint(1.0, species_constraints="Fe",
site_constraint_name="cn", site_constraints=4)
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4)
alls.sort(key=lambda x: x.properties['cn'], reverse=True)
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(0, 4)]),
sorted(["Fe,spin=-5", "Fe,spin=-5",
"Fe,spin=5", "Fe,spin=5"]))
self.assertEqual(sorted([str(alls[idx].specie) for idx in range(4,6)]),
sorted(["Fe,spin=5", "Fe,spin=5"]))
# now ordering on both sites, equivalent to order_parameter = 0.5
magtypes = {"Fe2+": 5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(0.5, species_constraints="Fe2+"),
MagOrderParameterConstraint(0.5, species_constraints="Fe3+")
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi, return_ranked_list=10)
struct = alls[0]["structure"]
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=5", "Fe2+,spin=-5"]))
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=-5",
"Fe3+,spin=-5", "Fe3+,spin=5"]))
self.assertEqual(len(alls), 4)
# now mixed orderings where neither are equal or 1
magtypes = {"Fe2+": 5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(0.5, species_constraints="Fe2+"),
MagOrderParameterConstraint(0.25, species_constraints="Fe3+")
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi, return_ranked_list=100)
struct = alls[0]["structure"]
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=5", "Fe2+,spin=-5"]))
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=-5",
"Fe3+,spin=-5", "Fe3+,spin=-5"]))
self.assertEqual(len(alls), 2)
# now order on multiple species
magtypes = {"Fe2+": 5, "Fe3+": 5}
order_parameters = [
MagOrderParameterConstraint(0.5, species_constraints=["Fe2+", "Fe3+"]),
]
trans = MagOrderingTransformation(magtypes, order_parameter=order_parameters)
alls = trans.apply_transformation(self.Fe3O4_oxi, return_ranked_list=10)
struct = alls[0]["structure"]
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(0,2)]),
sorted(["Fe2+,spin=5", "Fe2+,spin=-5"]))
self.assertEqual(sorted([str(struct[idx].specie) for idx in range(2, 6)]),
sorted(["Fe3+,spin=5", "Fe3+,spin=-5",
"Fe3+,spin=-5", "Fe3+,spin=5"]))
self.assertEqual(len(alls), 6)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class DopingTransformationTest(PymatgenTest):
def test_apply_transformation(self):
structure = PymatgenTest.get_structure("LiFePO4")
t = DopingTransformation("Ca2+", min_length=10)
ss = t.apply_transformation(structure, 100)
self.assertEqual(len(ss), 1)
t = DopingTransformation("Al3+", min_length=15, ionic_radius_tol=0.1)
ss = t.apply_transformation(structure, 100)
self.assertEqual(len(ss), 0)
# Aliovalent doping with vacancies
for dopant, nstructures in [("Al3+", 4), ("N3-", 420), ("Cl-", 16)]:
t = DopingTransformation(dopant, min_length=4, alio_tol=1,
max_structures_per_enum=1000)
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), nstructures)
for d in ss:
self.assertEqual(d["structure"].charge, 0)
# Aliovalent doping with codopant
for dopant, nstructures in [("Al3+", 3), ("N3-", 60), ("Cl-", 60)]:
t = DopingTransformation(dopant, min_length=4, alio_tol=1,
codopant=True,
max_structures_per_enum=1000)
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), nstructures)
for d in ss:
self.assertEqual(d["structure"].charge, 0)
# Make sure compensation is done with lowest oxi state
structure = PymatgenTest.get_structure("SrTiO3")
t = DopingTransformation("Nb5+", min_length=5, alio_tol=1,
max_structures_per_enum=1000,
allowed_doping_species=["Ti4+"])
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), 3)
for d in ss:
self.assertEqual(d["structure"].formula, "Sr7 Ti6 Nb2 O24")
def test_as_from_dict(self):
trans = DopingTransformation("Al3+", min_length=5, alio_tol=1,
codopant=False, max_structures_per_enum=1)
d = trans.as_dict()
# Check json encodability
s = json.dumps(d)
trans = DopingTransformation.from_dict(d)
self.assertEqual(str(trans.dopant), "Al3+")
self.assertEqual(trans.max_structures_per_enum, 1)
def test_find_codopant(self):
self.assertEqual(_find_codopant(Specie("Fe", 2), 1), Specie("Cu", 1))
self.assertEqual(_find_codopant(Specie("Fe", 2), 3), Specie("In", 3))
class SlabTransformationTest(PymatgenTest):
def test_apply_transformation(self):
s = self.get_structure("LiFePO4")
trans = SlabTransformation([0, 0, 1], 10, 10, shift = 0.25)
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
slab_from_gen = gen.get_slab(0.25)
slab_from_trans = trans.apply_transformation(s)
self.assertArrayAlmostEqual(slab_from_gen.lattice.matrix,
slab_from_trans.lattice.matrix)
self.assertArrayAlmostEqual(slab_from_gen.cart_coords,
slab_from_trans.cart_coords)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
trans = SlabTransformation([1, 1, 1], 10, 10)
slab_from_trans = trans.apply_transformation(fcc)
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab_from_gen = gen.get_slab()
self.assertArrayAlmostEqual(slab_from_gen.lattice.matrix,
slab_from_trans.lattice.matrix)
self.assertArrayAlmostEqual(slab_from_gen.cart_coords,
slab_from_trans.cart_coords)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
unittest.main()
|
setten/pymatgen
|
pymatgen/transformations/tests/test_advanced_transformations.py
|
Python
|
mit
| 25,380
|
[
"VASP",
"pymatgen"
] |
90cb3212736f381eb4f6e525902901c0f33a8eb7650bb5b3fae1599aed010119
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/server/server.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
from king_phisher.client import client_rpc
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.testing import KingPhisherServerTestCase
from king_phisher.utilities import random_string
class ServerTests(KingPhisherServerTestCase):
def test_http_method_get(self):
for phile in self.web_root_files(3):
http_response = self.http_request(phile)
self.assertHTTPStatus(http_response, 200)
def test_http_method_head(self):
for phile in self.web_root_files(3):
http_response = self.http_request(phile, method='HEAD')
self.assertHTTPStatus(http_response, 200)
def test_non_existing_resources(self):
http_response = self.http_request(random_string(30) + '.html')
self.assertHTTPStatus(http_response, 404)
http_response = self.http_request(random_string(30) + '.html')
self.assertHTTPStatus(http_response, 404)
def test_secret_id(self):
old_require_id = self.config.get('server.require_id')
self.config.set('server.require_id', True)
for phile in self.web_root_files(3):
http_response = self.http_request(phile, include_id=True)
self.assertHTTPStatus(http_response, 200)
http_response = self.http_request(phile, include_id=False)
self.assertHTTPStatus(http_response, 404)
self.config.set('server.require_id', False)
for phile in self.web_root_files(3):
http_response = self.http_request(phile, include_id=False)
self.assertHTTPStatus(http_response, 200)
self.config.set('server.require_id', old_require_id)
def test_static_resource_dead_drop(self):
http_response = self.http_request('kpdd', include_id=False)
self.assertHTTPStatus(http_response, 200)
def test_static_resource_javascript_hook(self):
http_response = self.http_request('kp.js')
self.assertHTTPStatus(http_response, 200)
content_type = http_response.getheader('Content-Type')
error_message = "HTTP Response received Content-Type {0} when {1} was expected".format(content_type, 'text/javascript')
self.assertEqual(content_type, 'text/javascript', msg=error_message)
javascript = http_response.read()
javascript = str(javascript.decode('utf-8'))
load_script = 'function loadScript(url, callback) {'
error_message = "Javascript did not defined the loadScript function"
self.assertTrue(load_script in javascript, msg=error_message)
beef_hook_url = "http://{0}:3000/hook.js".format(random_string(30))
self.config.set('beef.hook_url', beef_hook_url)
http_response = self.http_request('kp.js')
self.assertHTTPStatus(http_response, 200)
javascript = http_response.read()
javascript = str(javascript.decode('utf-8'))
load_script = "loadScript('{0}');".format(beef_hook_url)
error_message = "Javascript did not load the beef hook from the config"
self.assertTrue(load_script in javascript, msg=error_message)
def test_static_resource_tracking_image(self):
http_response = self.http_request(self.config.get('server.tracking_image'), include_id=False)
self.assertHTTPStatus(http_response, 200)
image_data = http_response.read()
self.assertTrue(image_data.startswith(b'GIF'))
class CampaignWorkflowTests(KingPhisherServerTestCase):
"""
This is a monolithic test broken down into steps which represent the basic
workflow of a normal campaign.
"""
def step_1_create_campaign(self):
self.campaign_id = self.rpc('campaign/new', 'Unit Test Campaign')
def step_2_send_messages(self):
self.landing_page = list(filter(lambda f: os.path.splitext(f)[1] == '.html', self.web_root_files()))[0]
self.rpc('campaign/landing_page/new', self.campaign_id, 'localhost', self.landing_page)
message_count = self.rpc('campaign/messages/count', self.campaign_id)
self.assertEqual(message_count, 0)
self.message_id = random_string(16)
self.rpc('campaign/message/new', self.campaign_id, self.message_id, 'test@test.com', 'testers, inc.', 'test', 'test')
message_count = self.rpc('campaign/messages/count', self.campaign_id)
self.assertEqual(message_count, 1)
def step_3_get_visits(self):
visit_count = self.rpc('campaign/visits/count', self.campaign_id)
self.assertEqual(visit_count, 0)
response = self.http_request('/' + self.landing_page, include_id=self.message_id)
self.assertHTTPStatus(response, 200)
visit_count = self.rpc('campaign/visits/count', self.campaign_id)
self.assertEqual(visit_count, 1)
cookie = response.getheader('Set-Cookie')
self.assertIsNotNone(cookie)
cookie = cookie.split(';')[0]
cookie_name = self.config.get('server.cookie_name')
self.assertEqual(cookie[:len(cookie_name) + 1], cookie_name + '=')
self.visit_id = cookie[len(cookie_name) + 1:]
def step_4_get_passwords(self):
creds_count = self.rpc('campaign/credentials/count', self.campaign_id)
self.assertEqual(creds_count, 0)
username = random_string(8)
password = random_string(10)
body = {'username': username, 'password': password}
headers = {'Cookie': "{0}={1}".format(self.config.get('server.cookie_name'), self.visit_id)}
response = self.http_request('/' + self.landing_page, method='POST', include_id=False, body=body, headers=headers)
self.assertHTTPStatus(response, 200)
creds_count = self.rpc('campaign/credentials/count', self.campaign_id)
self.assertEqual(creds_count, 1)
cred = next(self.rpc.remote_table('campaign/credentials', self.campaign_id))
self.assertEqual(cred['username'], username)
self.assertEqual(cred['password'], password)
self.assertEqual(cred['message_id'], self.message_id)
self.assertEqual(cred['visit_id'], self.visit_id)
def step_5_get_repeat_visit(self):
visit = self.rpc.remote_table_row('visits', self.visit_id)
visit_count = visit['visit_count']
headers = {'Cookie': "{0}={1}".format(self.config.get('server.cookie_name'), self.visit_id)}
response = self.http_request('/' + self.landing_page, include_id=False, headers=headers)
self.assertHTTPStatus(response, 200)
visit = self.rpc.remote_table_row('visits', self.visit_id)
self.assertEqual(visit['visit_count'], visit_count + 1)
def steps(self):
steps = filter(lambda f: f.startswith('step_'), dir(self))
steps = sorted(steps, key=lambda x: int(x.split('_')[1]))
for name in steps:
yield name, getattr(self, name)
def test_campaign_workflow(self):
self.config.set('server.require_id', True)
for name, step in self.steps():
step()
if __name__ == '__main__':
unittest.main()
|
0x0mar/king-phisher
|
tests/server/server.py
|
Python
|
bsd-3-clause
| 7,990
|
[
"VisIt"
] |
a2fa3a073165e0361d995638f426cf1feefea4b8a7766e09ca10d76f5ed31154
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2006 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: sampleposteddata.py
This page lists the data posted by a form.
File Authors:
Andrew Liu (andrew@liuholdings.com)
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table width="100%" border="1" cellspacing="0" bordercolor="#999999">
<tr style="FONT-WEIGHT: bold; COLOR: #dddddd; BACKGROUND-COLOR: #999999">
<td nowrap>Field Name </td>
<td>Value</td>
</tr>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<td valign="top" nowrap><b>%s</b></td>
<td width="100%%">%s</td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
|
viollarr/henriquecursos
|
site/FCKeditor/_samples/py/sampleposteddata.py
|
Python
|
apache-2.0
| 1,828
|
[
"VisIt"
] |
3b52b04cac651de15df37c71b81d44e31fb91cd46216e8740d18341b45e59265
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'experiment'
tab.settings['Output file name'] = 'mgo.exp'
#
# Change the first scenario tab to single crystal
#
tab = self.notebook
tab.switchScenario(0,scenarioType='Single crystal')
#
tab = self.notebook.scenarios[0]
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Angle of incidence'] = 10.0
tab.settings['Superstrate dielectric'] = 1.0
tab.settings['Substrate dielectric'] = 1.0
tab.settings['Superstrate depth'] = 999.0
tab.settings['Substrate depth'] = 999.0
tab.settings['Film thickness'] = 10000.0
tab.settings['Mode'] = 'Thick slab'
tab.settings['Legend'] = 'Angle 10'
#
#
for degs in [ 20, 30, 40, 50, 60, 70, 80]:
self.notebook.addScenario(scenarioType='Single crystal')
tab = self.notebook.scenarios[-1]
tab.settings['Angle of incidence'] = degs
tab.settings['Legend'] = 'Angle {}'.format(degs)
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 100
tab.settings['Maximum frequency'] = 800
tab.settings['Frequency increment'] = 0.2
tab.settings['Plot title'] = 'Test of Drude-Lorentz Dielectric for MgO'
tab.settings['Plot type'] = 'Crystal Reflectance (P polarisation)'
#
#
|
JohnKendrick/PDielec
|
Examples/Experiment/drude-lorentz/script.py
|
Python
|
mit
| 1,292
|
[
"CRYSTAL"
] |
73750ca77b5e0e1eaa0ba8b80cfdf6c98440b9d24e508b3f7a49ab77cc568ad4
|
try:
any = any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
import sys
from typehandlers.codesink import CodeSink
from typehandlers.base import TypeLookupError, TypeConfigurationError, CodeGenerationError, NotSupportedError, \
Parameter, ReturnValue
try:
from version import __version__
except ImportError:
__version__ = [0, 0, 0, 0]
import settings
import warnings
def write_preamble(code_sink, min_python_version=None):
"""
Write a preamble, containing includes, #define's and typedef's
necessary to correctly compile the code with the given minimum python
version.
"""
if min_python_version is None:
min_python_version = settings.min_python_version
assert isinstance(code_sink, CodeSink)
assert isinstance(min_python_version, tuple)
if __debug__:
## Gracefully allow code migration
if hasattr(code_sink, "have_written_preamble"):
warnings.warn("Duplicate call to write_preamble detected. "
"Note that there has been an API change in PyBindGen "
"and directly calling write_preamble should no longer be done "
"as it is done by PyBindGen itself.",
DeprecationWarning, stacklevel=2)
return
else:
setattr(code_sink, "have_written_preamble", None)
code_sink.writeln('''/* This file was generated by PyBindGen %s */
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stddef.h>
''' % '.'.join([str(x) for x in __version__]))
if min_python_version < (2, 4):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x020400F0
#define PyEval_ThreadsInitialized() 1
#define Py_CLEAR(op) \
do { \
if (op) { \
PyObject *tmp = (PyObject *)(op); \
(op) = NULL; \
Py_DECREF(tmp); \
} \
} while (0)
#define Py_VISIT(op) \
do { \
if (op) { \
int vret = visit((PyObject *)(op), arg); \
if (vret) \
return vret; \
} \
} while (0)
#endif
''')
if min_python_version < (2, 5):
code_sink.writeln(r'''
#if PY_VERSION_HEX < 0x020500F0
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
typedef inquiry lenfunc;
typedef intargfunc ssizeargfunc;
typedef intobjargproc ssizeobjargproc;
#endif
''')
code_sink.writeln(r'''
#if __GNUC__ > 2
# define PYBINDGEN_UNUSED(param) param __attribute__((__unused__))
#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
# define PYBINDGEN_UNUSED(param) __attribute__((__unused__)) param
#else
# define PYBINDGEN_UNUSED(param) param
#endif /* !__GNUC__ */
#ifndef _PyBindGenWrapperFlags_defined_
#define _PyBindGenWrapperFlags_defined_
typedef enum _PyBindGenWrapperFlags {
PYBINDGEN_WRAPPER_FLAG_NONE = 0,
PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED = (1<<0),
} PyBindGenWrapperFlags;
#endif
''')
def mangle_name(name):
"""make a name Like<This,and,That> look Like__lt__This_and_That__gt__"""
s = name.replace('<', '__lt__').replace('>', '__gt__').replace(',', '_')
s = s.replace(' ', '_').replace('&', '__amp__').replace('*', '__star__')
s = s.replace(':', '_')
s = s.replace('(', '_lp_').replace(')', '_rp_')
return s
def get_mangled_name(base_name, template_args):
"""for internal pybindgen use"""
assert isinstance(base_name, basestring)
assert isinstance(template_args, (tuple, list))
if template_args:
return '%s__lt__%s__gt__' % (mangle_name(base_name), '_'.join(
[mangle_name(arg) for arg in template_args]))
else:
return mangle_name(base_name)
class SkipWrapper(Exception):
"""Exception that is raised to signal a wrapper failed to generate but
must simply be skipped.
for internal pybindgen use"""
def call_with_error_handling(callback, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,
CodeGenerationError,
NotSupportedError)):
"""for internal pybindgen use"""
if settings.error_handler is None:
return callback(*args, **kwargs)
else:
try:
return callback(*args, **kwargs)
except Exception, ex:
if isinstance(ex, exceptions_to_handle):
dummy1, dummy2, traceback = sys.exc_info()
if settings.error_handler.handle_error(wrapper, ex, traceback):
raise SkipWrapper
else:
raise
else:
raise
def ascii(value):
"""
ascii(str_or_unicode_or_None) -> str_or_None
Make sure the value is either str or unicode object, and if it is
unicode convert it to ascii. Also, None is an accepted value, and
returns itself.
"""
if value is None:
return value
elif isinstance(value, str):
return value
elif isinstance(value, unicode):
return value.encode('ascii')
else:
raise TypeError("value must be str or ascii string contained in a unicode object")
def param(*args, **kwargs):
"""
Simplified syntax for representing a parameter with delayed lookup.
Parameters are the same as L{Parameter.new}.
"""
return (args + (kwargs,))
def retval(*args, **kwargs):
"""
Simplified syntax for representing a return value with delayed lookup.
Parameters are the same as L{ReturnValue.new}.
"""
return (args + (kwargs,))
def parse_param_spec(param_spec):
if isinstance(param_spec, tuple):
assert len(param_spec) >= 2
if isinstance(param_spec[-1], dict):
kwargs = param_spec[-1]
args = param_spec[:-1]
else:
kwargs = dict()
args = param_spec
else:
raise TypeError("Could not parse `%r' as a Parameter" % param_spec)
return args, kwargs
def parse_retval_spec(retval_spec):
if isinstance(retval_spec, tuple):
assert len(retval_spec) >= 1
if isinstance(retval_spec[-1], dict):
kwargs = retval_spec[-1]
args = retval_spec[:-1]
else:
kwargs = dict()
args = retval_spec
elif isinstance(retval_spec, str):
kwargs = dict()
args = (retval_spec,)
else:
raise TypeError("Could not parse `%r' as a ReturnValue" % retval_spec)
return args, kwargs
def eval_param(param_value, wrapper=None):
if isinstance(param_value, Parameter):
return param_value
else:
args, kwargs = parse_param_spec(param_value)
return call_with_error_handling(Parameter.new, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,
NotSupportedError,
TypeLookupError))
def eval_retval(retval_value, wrapper=None):
if isinstance(retval_value, ReturnValue):
return retval_value
else:
args, kwargs = parse_retval_spec(retval_value)
return call_with_error_handling(ReturnValue.new, args, kwargs, wrapper,
exceptions_to_handle=(TypeConfigurationError,
NotSupportedError,
TypeLookupError))
|
cawka/pybindgen-old
|
pybindgen/utils.py
|
Python
|
lgpl-2.1
| 7,846
|
[
"VisIt"
] |
edc29cd43795c08165f619d4dd903afd93f31386b48b7ab40e722f620333f110
|
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup
from setuptools import distutils
import os
import sys
def get_version_from_pkg_info():
metadata = distutils.dist.DistributionMetadata("PKG-INFO")
return metadata.version
def get_version_from_pyver():
try:
import pyver
except ImportError:
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
raise ImportError('You must install pyver to create a package')
else:
return 'noversion'
version, version_info = pyver.get_version(pkg="datalake_api",
public=True)
return version
def get_version():
if os.path.exists("PKG-INFO"):
return get_version_from_pkg_info()
else:
return get_version_from_pyver()
setup(name='datalake_api',
url='https://github.com/planetlabs/datalake-api',
version=get_version(),
description='datalake_api ingests datalake metadata records',
author='Brian Cavagnolo',
author_email='brian@planet.com',
packages=['datalake_api'],
install_requires=[
'pyver>=1.0.18',
'memoized_property>=1.0.2',
'simplejson>=3.3.1',
'datalake-common>=0.25',
'Flask>=0.10.1',
'flask-swagger==0.2.8',
'boto3==1.1.3',
'raven[flask]>=5.6.0',
'blinker>=1.4',
],
extras_require={
'test': [
'pytest==2.7.2',
'flake8==2.5.0',
'moto==0.4.23',
],
},
include_package_data=True)
|
planetlabs/datalake-api
|
setup.py
|
Python
|
apache-2.0
| 2,140
|
[
"Brian"
] |
843be31ceb1a51dbdbf96688f4c24fc89dca067eca228ed08a9fa1892218b54f
|
# -*- coding: iso-8859-15 -*-
"""
routes
======
This module establishes and defines the Web Handlers and Websockets
that are associated with a specific URL routing name. New routing
associations must be defined here.
Notes
-----
For more information regarding routing URL and valid regular expressions
visit: http://www.tornadoweb.org/en/stable/guide/structure.html
"""
import os
import sys
import web
import rest
#Define new rest associations
REST = [
(r"/api/countries(/?([A-Z]{2})?)", rest.countries_rest.MainHandler),
(r'/api/flights', rest.flights_rest.MainHandler),
(r'/api/users', rest.users_rest.MainHandler),
(r'/api/videos', rest.videos_rest.MainHandler)
]
# Define new web rendering route associations
WEB = [
(r'/flights', web.flights_handler.MainHandler)
]
ROUTES = REST + WEB
|
isis2304/python-server
|
routes.py
|
Python
|
mit
| 796
|
[
"VisIt"
] |
5de6f06578105c277cfb7ba92f490b64085314288664522973c1b3dffce9ed72
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class financialField3(vtk.test.Testing.vtkTest):
def testFinancialField(self):
"""
Demonstrate the use and manipulation of fields and use of
vtkProgrammableDataObjectSource. This creates fields the hard way
(as compared to reading a vtk field file), but shows you how to
interface to your own raw data.
The image should be the same as financialField.tcl
"""
xAxis = "INTEREST_RATE"
yAxis = "MONTHLY_PAYMENT"
zAxis = "MONTHLY_INCOME"
scalar = "TIME_LATE"
# Parse an ascii file and manually create a field. Then construct a
# dataset from the field.
dos = vtk.vtkProgrammableDataObjectSource()
def parseFile():
f = open(VTK_DATA_ROOT + "/Data/financial.txt", "r")
line = f.readline().split()
# From the size calculate the number of lines.
numPts = int(line[1])
numLines = (numPts - 1) / 8 + 1
# create the data object
field = vtk.vtkFieldData()
field.AllocateArrays(4)
# read TIME_LATE - dependent variable
while True:
line = f.readline().split()
if len(line) > 0:
break;
timeLate = vtk.vtkFloatArray()
timeLate.SetName(line[0])
for i in range(0, numLines):
line = f.readline().split()
for j in line:
timeLate.InsertNextValue(float(j))
field.AddArray(timeLate)
# MONTHLY_PAYMENT - independent variable
while True:
line = f.readline().split()
if len(line) > 0:
break;
monthlyPayment = vtk.vtkFloatArray()
monthlyPayment.SetName(line[0])
for i in range(0, numLines):
line = f.readline().split()
for j in line:
monthlyPayment.InsertNextValue(float(j))
field.AddArray(monthlyPayment)
# UNPAID_PRINCIPLE - skip
while True:
line = f.readline().split()
if len(line) > 0:
break;
for i in range(0, numLines):
line = f.readline()
# LOAN_AMOUNT - skip
while True:
line = f.readline().split()
if len(line) > 0:
break;
for i in range(0, numLines):
line = f.readline()
# INTEREST_RATE - independent variable
while True:
line = f.readline().split()
if len(line) > 0:
break;
interestRate = vtk.vtkFloatArray()
interestRate.SetName(line[0])
for i in range(0, numLines):
line = f.readline().split()
for j in line:
interestRate.InsertNextValue(float(j))
field.AddArray(interestRate)
# MONTHLY_INCOME - independent variable
while True:
line = f.readline().split()
if len(line) > 0:
break;
monthlyIncome = vtk.vtkFloatArray()
monthlyIncome.SetName(line[0])
for i in range(0, numLines):
line = f.readline().split()
for j in line:
monthlyIncome.InsertNextValue(float(j))
field.AddArray(monthlyIncome)
dos.GetOutput().SetFieldData(field)
dos.SetExecuteMethod(parseFile)
# Create the dataset
do2ds = vtk.vtkDataObjectToDataSetFilter()
do2ds.SetInputConnection(dos.GetOutputPort())
do2ds.SetDataSetTypeToPolyData()
#format: component#, arrayname, arraycomp, minArrayId, maxArrayId, normalize
do2ds.DefaultNormalizeOn()
do2ds.SetPointComponent(0, xAxis, 0)
do2ds.SetPointComponent(1, yAxis, 0)
do2ds.SetPointComponent(2, zAxis, 0)
do2ds.Update()
rf = vtk.vtkRearrangeFields()
rf.SetInputConnection(do2ds.GetOutputPort())
rf.AddOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
rf.RemoveOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
rf.AddOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
rf.RemoveAllOperations()
rf.AddOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
rf.Update()
max = rf.GetOutput().GetPointData().GetArray(scalar).GetRange(0)[1]
calc = vtk.vtkArrayCalculator()
calc.SetInputConnection(rf.GetOutputPort())
calc.SetAttributeModeToUsePointData()
calc.SetFunction("s / %f" % max)
calc.AddScalarVariable("s", scalar, 0)
calc.SetResultArrayName("resArray")
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(calc.GetOutputPort())
aa.Assign("resArray", "SCALARS", "POINT_DATA")
aa.Update()
rf2 = vtk.vtkRearrangeFields()
rf2.SetInputConnection(aa.GetOutputPort())
rf2.AddOperation("COPY", "SCALARS", "POINT_DATA", "DATA_OBJECT")
# construct pipeline for original population
popSplatter = vtk.vtkGaussianSplatter()
popSplatter.SetInputConnection(rf2.GetOutputPort())
popSplatter.SetSampleDimensions(50, 50, 50)
popSplatter.SetRadius(0.05)
popSplatter.ScalarWarpingOff()
popSurface = vtk.vtkContourFilter()
popSurface.SetInputConnection(popSplatter.GetOutputPort())
popSurface.SetValue(0, 0.01)
popMapper = vtk.vtkPolyDataMapper()
popMapper.SetInputConnection(popSurface.GetOutputPort())
popMapper.ScalarVisibilityOff()
popMapper.ImmediateModeRenderingOn()
popActor = vtk.vtkActor()
popActor.SetMapper(popMapper)
popActor.GetProperty().SetOpacity(0.3)
popActor.GetProperty().SetColor(.9, .9, .9)
# construct pipeline for delinquent population
lateSplatter = vtk.vtkGaussianSplatter()
lateSplatter.SetInputConnection(aa.GetOutputPort())
lateSplatter.SetSampleDimensions(50, 50, 50)
lateSplatter.SetRadius(0.05)
lateSplatter.SetScaleFactor(0.05)
lateSurface = vtk.vtkContourFilter()
lateSurface.SetInputConnection(lateSplatter.GetOutputPort())
lateSurface.SetValue(0, 0.01)
lateMapper = vtk.vtkPolyDataMapper()
lateMapper.SetInputConnection(lateSurface.GetOutputPort())
lateMapper.ScalarVisibilityOff()
lateActor = vtk.vtkActor()
lateActor.SetMapper(lateMapper)
lateActor.GetProperty().SetColor(1.0, 0.0, 0.0)
# create axes
popSplatter.Update()
bounds = popSplatter.GetOutput().GetBounds()
axes = vtk.vtkAxes()
axes.SetOrigin(bounds[0], bounds[2], bounds[4])
axes.SetScaleFactor(popSplatter.GetOutput().GetLength() / 5.0)
axesTubes = vtk.vtkTubeFilter()
axesTubes.SetInputConnection(axes.GetOutputPort())
axesTubes.SetRadius(axes.GetScaleFactor() / 25.0)
axesTubes.SetNumberOfSides(6)
axesMapper = vtk.vtkPolyDataMapper()
axesMapper.SetInputConnection(axesTubes.GetOutputPort())
axesActor = vtk.vtkActor()
axesActor.SetMapper(axesMapper)
# label the axes
XText = vtk.vtkVectorText()
XText.SetText(xAxis)
XTextMapper = vtk.vtkPolyDataMapper()
XTextMapper.SetInputConnection(XText.GetOutputPort())
XActor = vtk.vtkFollower()
XActor.SetMapper(XTextMapper)
XActor.SetScale(0.02, .02, .02)
XActor.SetPosition(0.35, -0.05, -0.05)
XActor.GetProperty().SetColor(0, 0, 0)
YText = vtk.vtkVectorText()
YText.SetText(yAxis)
YTextMapper = vtk.vtkPolyDataMapper()
YTextMapper.SetInputConnection(YText.GetOutputPort())
YActor = vtk.vtkFollower()
YActor.SetMapper(YTextMapper)
YActor.SetScale(0.02, .02, .02)
YActor.SetPosition(-0.05, 0.35, -0.05)
YActor.GetProperty().SetColor(0, 0, 0)
ZText = vtk.vtkVectorText()
ZText.SetText(zAxis)
ZTextMapper = vtk.vtkPolyDataMapper()
ZTextMapper.SetInputConnection(ZText.GetOutputPort())
ZActor = vtk.vtkFollower()
ZActor.SetMapper(ZTextMapper)
ZActor.SetScale(0.02, .02, .02)
ZActor.SetPosition(-0.05, -0.05, 0.35)
ZActor.GetProperty().SetColor(0, 0, 0)
# Graphics stuff
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName("vtk(-, Field.Data")
renWin.SetSize(300, 300)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(axesActor)
ren.AddActor(lateActor)
ren.AddActor(XActor)
ren.AddActor(YActor)
ren.AddActor(ZActor)
ren.AddActor(popActor) #it's last because its translucent)
ren.SetBackground(1, 1, 1)
camera = vtk.vtkCamera()
camera.SetClippingRange(.274, 13.72)
camera.SetFocalPoint(0.433816, 0.333131, 0.449)
camera.SetPosition(-1.96987, 1.15145, 1.49053)
camera.SetViewUp(0.378927, 0.911821, 0.158107)
ren.SetActiveCamera(camera)
XActor.SetCamera(camera)
YActor.SetCamera(camera)
ZActor.SetCamera(camera)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "financialField3.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(financialField3, 'test')])
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Core/Testing/Python/financialField3.py
|
Python
|
bsd-3-clause
| 10,952
|
[
"VTK"
] |
fd0120f0c6cf1d47c11b4a65a6b6edfefbb94808d9423db29568440ef503b192
|
import numpy as np
import moose
print( '[INFO] Using moose from %s' % moose.__file__ )
import time
# Does not guarantee thread determinism in multithreaded Gsolve/Ksolve.
moose.seed( 10 )
def printCompt(compt):
print( 'x0=%s, x1=%s, diffLength=%s, numDiffCompt=%d' % (compt.x0, compt.x1,
compt.diffLength, compt.numDiffCompts))
def test_gsolve_paralllel(nT=4):
"""
This example implements a reaction-diffusion like system which is
bistable and propagates losslessly. It is based on the NEURON example
rxdrun.py, but incorporates more compartments and runs for a longer time.
The system is implemented in a function rather than as a proper system
of chemical reactions. Please see rxdReacDiffusion.py for a variant that
uses a reaction plus a function object to control its rates.
"""
print( 'Using %d threads' % nT )
dt = 0.1
# define the geometry
compt = moose.CylMesh( '/cylinder' )
compt.r0 = compt.r1 = 100e-9
compt.x1 = 200e-09
compt.diffLength = 0.2e-9
assert( compt.numDiffCompts == compt.x1/compt.diffLength)
#define the molecule. Its geometry is defined by its parent volume, cylinder
c = moose.Pool( '/cylinder/pool' )
c.diffConst = 1e-13 # define diffusion constant
# Here we set up a function calculation
func = moose.Function( '/cylinder/pool/func' )
func.expr = "(-x0*(30e-9-x0)*(100e-9-x0))*0.0001"
# func.x.num = 1 #specify number of input variables.
#Connect the molecules to the func
moose.connect( c, 'nOut', func.x[0], 'input' )
#Connect the function to the pool
moose.connect( func, 'valueOut', c, 'increment' )
#Set up solvers
ksolve = moose.Gsolve( '/cylinder/Gsolve' )
ksolve.numThreads = nT
dsolve = moose.Dsolve( '/cylinder/dsolve' )
stoich = moose.Stoich( '/cylinder/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = '/cylinder/##'
#initialize
x = np.arange( 0, compt.x1, compt.diffLength )
c.vec.nInit = [ 1000.0 for q in x ]
# Run and plot it.
moose.reinit()
updateDt = 50
runtime = updateDt * 10
t1 = time.time()
res = []
clk = moose.element( '/clock' )
for t in range( 0, runtime-1, updateDt ):
y = c.vec.n
s = np.sum(y)
v = (np.mean(y), np.max(y), np.min(y), s)
print(v)
res.append(v)
moose.start( updateDt )
currTime = clk.currentTime
# One molecule here and there because thread launching has undeterministic
# characteristics. Even after setting moose.seed; we may not see same
# numbers on all platfroms.
expected = [
(1000.0, 1000.0, 1000.0, 1000000.0)
, (9.908, 10.0, 8.0, 9908.0)
, (6.869, 7.0, 6.0, 6869.0)
, (5.354, 6.0, 5.0, 5354.0)
, (4.562, 5.0, 4.0, 4562.0)
, (3.483, 4.0, 3.0, 3483.0)
, (3.043, 4.0, 3.0, 3043.0)
, (2.261, 3.0, 2.0, 2261.0)
, (1.967, 2.0, 1.0, 1967.0)
, (1.997, 2.0, 1.0, 1997.0) ]
print("Time = ", time.time() - t1)
assert np.isclose(res, expected, atol=1, rtol=1).all(), "Got %s, expected %s" % (res, expected)
def main(nT):
test_gsolve_paralllel(nT)
if __name__ == '__main__':
import sys
import multiprocessing
nT = max(1, int(multiprocessing.cpu_count()))
if len(sys.argv) > 1:
nT = int(sys.argv[1])
main( nT )
|
dilawar/moose-core
|
tests/core/test_gsolve_parallel.py
|
Python
|
gpl-3.0
| 3,474
|
[
"MOOSE",
"NEURON"
] |
595ec6badafaa4d3b362a26f57d1e6af6b80861a64b8ae4416b653ff3ce8a018
|
import numpy as np
import mbuild as mb
class Betacristobalite(mb.Compound):
"""The beta-cristobalite form of SiO2.
Area per port specifies the density of attachment sites in nm^2.
The crystal is expanded to yield an area per port of 0.25 nm^2, the
typical density of alkane monolayers on SiO2 although these are actually
grown on amorphous SiO2 in experiment.
See http://www.wikiwand.com/en/Silicon_dioxide for more info on the various
crystal forms.
Note: Port sites are currently naively determined by placing them on all
oxygens which are above 1.0 nm in the z-direction. This only holds true for
the beta-cristobalite-expanded.mol2 file. If you add a new one, please modify
the file or the method of determining port locations.
"""
def __init__(self):
super(Betacristobalite, self).__init__()
mb.load('beta-cristobalite-expanded.mol2', compound=self,
relative_to_module=self.__module__)
self.periodicity = np.array([5.3888, 4.6669, 0.0])
count = 0
for particle in self.particles():
if particle.name == 'O' and particle.pos[2] > 1.0:
count += 1
port = mb.Port(anchor=particle)
mb.rotate_around_x(port, np.pi/2)
mb.translate(port, particle.pos + np.array([0, 0, .1]))
self.add(port, 'port_{}'.format(count))
if __name__ == "__main__":
single = Betacristobalite()
multiple = mb.TiledCompound(single, n_tiles=(2, 1, 1), name="tiled")
multiple.visualize(show_ports=True)
|
Jonestj1/mbuild
|
mbuild/lib/surfaces/betacristobalite.py
|
Python
|
mit
| 1,592
|
[
"CRYSTAL"
] |
376c7157e3ddf67719c92ee344d09982d6dcf2582886a408b368f1e5a2e68a9e
|
###
### This script retrieves the coverage profiles of RNA-seq and Ribo-seq for all ribosomal protein genes. It stores it as text files.
###
import sys,numpy,HTSeq
import multiprocessing,multiprocessing.pool
def analysis(genomicFeature):
'''
This function computes the histograms of reads across transcript lengths.
'''
print('\t computing coverage for {}...'.format(genomicFeature))
# f.1 define window of coverage depending if it's an operon or a gene
print('\t\t computing window...')
if genomicFeature in riboOperons.keys(): # work with operons
print(genomicFeature)
# obtain the relevant features
contigs=[]; starts=[]; ends=[]; strands=[]
localGenes=riboOperons[genomicFeature]
for feature in annotationObject:
if feature.type == 'gene':
strippedID=feature.attr['ID']
if strippedID in localGenes:
contig=feature.iv.chrom
start=feature.iv.start+1
end=feature.iv.end
strand=feature.iv.strand
contigs.append(contig); starts.append(start); ends.append(end); strands.append(strand)
# check consistency of strands
if len(list(set(strands))) > 1:
print('Detected gene in operon with different orientation. Exiting...')
sys.exit()
# define positions for coverage computing
contig=contigs[0]
start=min(starts)
end=max(ends)
strand=strands[0]
windowStart=start-margin
windowEnd=end+margin+1
windowP=HTSeq.GenomicInterval(contig,windowStart,windowEnd,"+")
windowM=HTSeq.GenomicInterval(contig,windowStart,windowEnd,"-")
else: # work with genes
for feature in annotationObject:
if feature.type == 'gene':
strippedID=feature.attr['ID']
if strippedID == genomicFeature:
break
# define positions for coverage computing
contig=feature.iv.chrom
start=feature.iv.start+1
end=feature.iv.end
strand=feature.iv.strand
windowStart=start-margin
windowEnd=end+margin+1
windowP=HTSeq.GenomicInterval(contig,windowStart,windowEnd,"+")
windowM=HTSeq.GenomicInterval(contig,windowStart,windowEnd,"-")
# f.2. compute coverage based on window
print('\t\t computing coverage...')
coverage=HTSeq.GenomicArray("auto",stranded=True,typecode="i")
for timepoint in timepoints:
for replicate in replicates:
for experiment in experiments:
# f.1. define the bam file
bamFile=bamFilesDir+'{}.{}.{}/Aligned.sortedByCoord.out.bam'.format(experiment,replicate,timepoint)
# f.2. read BAM file
sortedBAMfile=HTSeq.BAM_Reader(bamFile)
for alignment in sortedBAMfile:
if alignment.aligned:
coverage[ alignment.iv ] += 1
# f.3. compute coverage
profileP=list(coverage[windowP])
profileM=list(coverage[windowM])
# f.4. define genomic positions with respect to strands
loc=numpy.arange(windowStart,windowEnd)
if strand == '+':
pos=loc
elif strand == '-':
pos=loc[::-1]
else:
print('error at strand selection')
sys.exit()
# f.5. writing a file
fileName='{}{}.{}.{}.{}.txt'.format(coverageDir,timepoint,replicate,genomicFeature,experiment)
f=open(fileName,'w')
f.write('# name {}\n'.format(genomicFeature))
f.write('# timepoint {}\n'.format(timepoint))
f.write('# replicate {}\n'.format(replicate))
f.write('# strand {}\n'.format(strand))
f.write('# experiment {}\n'.format(experiment))
f.write('# sumP,sumM {},{}\n'.format(sum(profileP),sum(profileM)))
f.write('# location \t counts on strand plus \t counts on strand minus\n')
for i in range(len(pos)):
f.write('{}\t{}\t{}\n'.format(pos[i],profileP[i],profileM[i]))
f.close()
return None
def dataReader():
'''
This function reads the ribosomal protein operons and genes.
'''
# f.1. ribo-pt gene operons
operonPredictions={}
fileName=operonPredictionsDir+'riboPtOperons.txt'
with open(fileName,'r') as f:
next(f)
for line in f:
vector=line.split('\t')
name=vector[0]
genes=[]
for i in range(len(vector)-1):
gene=vector[i+1].replace('\n','')
genes.append(gene)
operonPredictions[name]=genes
# f.2. non-operon ribo-pt genes
NORPGs=[]
fileName=operonPredictionsDir+'NORPGs.txt'
with open(fileName,'r') as f:
next(f)
for line in f:
vector=line.split('\t')
name=vector[0].replace('\n','')
NORPGs.append(name)
# f.3. print information about retrieval
a=[]
for operon in operonPredictions:
for name in operonPredictions[operon]:
if name not in a:
a.append(name)
print('\t Recovered {} genes in {} operons.'.format(len(a),len(operonPredictions)))
print('\t Recovered {} genes not in operons.'.format(len(NORPGs)))
for name in NORPGs:
if name not in a:
a.append(name)
print('\t Total genes recovered: {}'.format(len(a)))
return operonPredictions,NORPGs
###
### MAIN
###
# 0. user defined variables
bamFilesDir='/Volumes/omics4tb/alomana/projects/TLR/data/BAM/'
annotationFile='/Volumes/omics4tb/alomana/projects/TLR/data/genome/alo.build.NC002607.NC001869.NC002608.gff3'
coverageDir='/Volumes/omics4tb/alomana/projects/TLR/data/coverage/'
operonPredictionsDir='/Volumes/omics4tb/alomana/projects/TLR/data/microbesOnline/'
timepoints=['tp.1','tp.2','tp.3','tp.4']
replicates=['rep.1','rep.2','rep.3']
experiments=['rbf','trna']
margin=100 # excess of base pairs
# 1. read data
print('Reading data...')
riboOperons,NORPGs=dataReader()
# 2. iterate analysis over ribosomal proteins
print('Performing analysis...')
# 2.1. read annotation file
annotationObject=HTSeq.GFF_Reader(annotationFile)
# 2.2. selecting appropriate genomic locations
genomicFeatures=list(riboOperons.keys())+NORPGs
genomicFeatures.sort()
genomicFeatures=['gene-VNG_RS06605']
# 2.3.a. iterate over genomicFeatures in a parallel manner
numberOfThreads=len(genomicFeatures)
print('Initialized parallel analysis using {} threads...'.format(numberOfThreads))
hydra=multiprocessing.pool.Pool(numberOfThreads)
tempo=hydra.map(analysis,genomicFeatures)
print('... completed.')
# 2.3.b. iterate over genomicFeatures single-thread
#for genomicFeature in genomicFeatures:
# analysis(genomicFeature)
|
adelomana/30sols
|
SI/extra/coverage/profiler.py
|
Python
|
gpl-3.0
| 7,122
|
[
"HTSeq"
] |
5e501b2d4952989cb66379a2326f676cf3ec5bd5440f3b4309925bbfc2bd967b
|
from dateutil import parser
from hs_core.hydroshare.utils import get_resource_file_name_and_extension
from hs_file_types.models import GeoRasterLogicalFile, GeoRasterFileMetaData, GenericLogicalFile, \
NetCDFLogicalFile, GeoFeatureLogicalFile, GeoFeatureFileMetaData, RefTimeseriesLogicalFile, \
TimeSeriesLogicalFile, TimeSeriesFileMetaData
def assert_raster_file_type_metadata(self):
# test the resource now has 2 files (vrt file added as part of metadata extraction)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# check that the 2 resource files are now associated with GeoRasterLogicalFile
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
self.assertEqual(res_file.has_logical_file, True)
self.assertTrue(isinstance(res_file.logical_file, GeoRasterLogicalFile))
# check that we put the 2 files in a new folder (small_logan)
for res_file in self.composite_resource.files.all():
file_path, base_file_name, _ = get_resource_file_name_and_extension(res_file)
expected_file_path = "{}/data/contents/small_logan/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# check that there is one GeoRasterLogicalFile object
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
res_file = self.composite_resource.files.first()
# check that the logicalfile is associated with 2 files
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'small_logan')
self.assertEqual(logical_file.has_metadata, True)
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# test that size property of the logical file is equal to sun of size of all files
# that are part of the logical file
self.assertEqual(logical_file.size, sum([f.size for f in logical_file.files.all()]))
# test that there should be 1 object of type GeoRasterFileMetaData
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
# test that the metadata associated with logical file id of type GeoRasterFileMetaData
self.assertTrue(isinstance(logical_file.metadata, GeoRasterFileMetaData))
# there should be 2 format elements associated with resource
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
self.assertEqual(
self.composite_resource.metadata.formats.all().filter(value='application/vrt').count(),
1)
self.assertEqual(self.composite_resource.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# test extracted metadata for the file type
# geo raster file type should have all the metadata elements
self.assertEqual(logical_file.metadata.has_all_required_elements(), True)
# there should be 1 coverage element - box type
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertEqual(logical_file.metadata.spatial_coverage.type, 'box')
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.0500269597691)
self.assertEqual(box_coverage.value['eastlimit'], -111.57773718106195)
self.assertEqual(box_coverage.value['southlimit'], 41.98722286029891)
self.assertEqual(box_coverage.value['westlimit'], -111.69756293084055)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEqual(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4655492.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 452144.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4648592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 442274.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'],
'NAD83 / UTM zone 12N')
# testing extended metadata element: cell information
cell_info = logical_file.metadata.cellInformation
self.assertEqual(cell_info.rows, 230)
self.assertEqual(cell_info.columns, 329)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(logical_file.metadata.bandInformations.count(), 1)
band_info = logical_file.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '2880.00708008')
self.assertEqual(band_info.minimumValue, '1870.63659668')
def assert_netcdf_file_type_metadata(self, title):
# check that there is one NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# There should be now 2 files
self.assertEqual(self.composite_resource.files.count(), 2)
# check that we put the 2 files in a new folder (netcdf_valid)
for res_file in self.composite_resource.files.all():
file_path, base_file_name = res_file.full_path, res_file.file_name
expected_file_path = u"{}/data/contents/netcdf_valid/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# logical file should be associated with 2 files
self.assertEqual(logical_file.files.all().count(), 2)
file_extensions = set([f.extension for f in logical_file.files.all()])
self.assertIn('.nc', file_extensions)
self.assertIn('.txt', file_extensions)
# test extracted netcdf file type metadata
# there should 2 content file
self.assertEqual(self.composite_resource.files.all().count(), 2)
# test core metadata after metadata extraction
# title = "Test NetCDF File Type Metadata"
self.assertEqual(self.composite_resource.metadata.title.value, title)
# there should be an abstract element
self.assertNotEqual(self.composite_resource.metadata.description, None)
extracted_abstract = "This netCDF data is the simulation output from Utah Energy " \
"Balance (UEB) model.It includes the simulation result " \
"of snow water equivalent during the period " \
"Oct. 2009 to June 2010 for TWDEF site in Utah."
self.assertEqual(self.composite_resource.metadata.description.abstract, extracted_abstract)
# there should be no source element
self.assertEqual(self.composite_resource.metadata.sources.all().count(), 0)
# there should be one license element:
self.assertNotEquals(self.composite_resource.metadata.rights.statement, 1)
# there should be no relation element
self.assertEqual(self.composite_resource.metadata.relations.all().count(), 0)
# there should be 2 creator
self.assertEqual(self.composite_resource.metadata.creators.all().count(), 2)
# there should be one contributor
self.assertEqual(self.composite_resource.metadata.contributors.all().count(), 1)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(type='box').
count(), 1)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(type='period').
count(), 1)
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.867126409)
self.assertEqual(box_coverage.value['eastlimit'], -111.505940368)
self.assertEqual(box_coverage.value['southlimit'], 41.8639080745)
self.assertEqual(box_coverage.value['westlimit'], -111.51138808)
temporal_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('10/01/2009').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('05/30/2010').date())
# there should be 2 format elements
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
self.assertEqual(self.composite_resource.metadata.formats.all().
filter(value='text/plain').count(), 1)
self.assertEqual(self.composite_resource.metadata.formats.all().
filter(value='application/x-netcdf').count(), 1)
# test file type metadata
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# there should be one keyword element
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertIn('Snow water equivalent', logical_file.metadata.keywords)
# test dataset_name attribute of the logical file which shoould have the extracted value
dataset_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010"
self.assertEqual(logical_file.dataset_name, dataset_title)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.projection_string_type, 'Proj4 String')
proj_text = u'+proj=tmerc +y_0=0.0 +k_0=0.9996 +x_0=500000.0 +lat_0=0.0 +lon_0=-111.0'
self.assertEqual(ori_coverage.projection_string_text, proj_text)
self.assertEqual(ori_coverage.value['northlimit'], '4.63515e+06')
self.assertEqual(ori_coverage.value['eastlimit'], '458010.0')
self.assertEqual(ori_coverage.value['southlimit'], '4.63479e+06')
self.assertEqual(ori_coverage.value['westlimit'], '457560.0')
self.assertEqual(ori_coverage.value['units'], 'Meter')
self.assertEqual(ori_coverage.value['projection'], 'transverse_mercator')
# testing extended metadata element: variables
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# test time variable
var_time = logical_file.metadata.variables.all().filter(name='time').first()
self.assertNotEquals(var_time, None)
self.assertEqual(var_time.unit, 'hours since 2009-10-1 0:0:00 UTC')
self.assertEqual(var_time.type, 'Float')
self.assertEqual(var_time.shape, 'time')
self.assertEqual(var_time.descriptive_name, 'time')
# test x variable
var_x = logical_file.metadata.variables.all().filter(name='x').first()
self.assertNotEquals(var_x, None)
self.assertEqual(var_x.unit, 'Meter')
self.assertEqual(var_x.type, 'Float')
self.assertEqual(var_x.shape, 'x')
self.assertEqual(var_x.descriptive_name, 'x coordinate of projection')
# test y variable
var_y = logical_file.metadata.variables.all().filter(name='y').first()
self.assertNotEquals(var_y, None)
self.assertEqual(var_y.unit, 'Meter')
self.assertEqual(var_y.type, 'Float')
self.assertEqual(var_y.shape, 'y')
self.assertEqual(var_y.descriptive_name, 'y coordinate of projection')
# test SWE variable
var_swe = logical_file.metadata.variables.all().filter(name='SWE').first()
self.assertNotEquals(var_swe, None)
self.assertEqual(var_swe.unit, 'm')
self.assertEqual(var_swe.type, 'Float')
self.assertEqual(var_swe.shape, 'y,x,time')
self.assertEqual(var_swe.descriptive_name, 'Snow water equivalent')
self.assertEqual(var_swe.method, 'model simulation of UEB model')
self.assertEqual(var_swe.missing_value, '-9999')
# test grid mapping variable
var_grid = logical_file.metadata.variables.all(). \
filter(name='transverse_mercator').first()
self.assertNotEquals(var_grid, None)
self.assertEqual(var_grid.unit, 'Unknown')
self.assertEqual(var_grid.type, 'Unknown')
self.assertEqual(var_grid.shape, 'Not defined')
def assert_geofeature_file_type_metadata(self, expected_folder_name):
# test files in the file type
self.assertEqual(self.composite_resource.files.count(), 3)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# check that there is one GeoFeatureLogicalFile object
self.assertEqual(GeoFeatureLogicalFile.objects.count(), 1)
# check that there is one GeoFeatureFileMetaData object
self.assertEqual(GeoFeatureFileMetaData.objects.count(), 1)
logical_file = GeoFeatureLogicalFile.objects.first()
self.assertEqual(logical_file.files.count(), 3)
# check that the 3 resource files are now associated with GeoFeatureLogicalFile
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.logical_file_type_name, "GeoFeatureLogicalFile")
self.assertEqual(res_file.has_logical_file, True)
self.assertTrue(isinstance(res_file.logical_file, GeoFeatureLogicalFile))
# check that we put the 3 files in a new folder
for res_file in self.composite_resource.files.all():
file_path, base_file_name, _ = get_resource_file_name_and_extension(res_file)
expected_file_path = "{}/data/contents/{}/{}"
res_file.file_folder = expected_folder_name
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
expected_folder_name, base_file_name)
self.assertEqual(file_path, expected_file_path)
# test extracted raster file type metadata
# there should not be any resource level coverage
self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
self.assertNotEqual(logical_file.metadata.geometryinformation, None)
self.assertEqual(logical_file.metadata.geometryinformation.featureCount, 51)
self.assertEqual(logical_file.metadata.geometryinformation.geometryType,
"MULTIPOLYGON")
self.assertNotEqual(logical_file.metadata.originalcoverage, None)
self.assertEqual(logical_file.metadata.originalcoverage.datum,
'unknown')
self.assertEqual(logical_file.metadata.originalcoverage.projection_name,
'unknown')
self.assertGreater(len(logical_file.metadata.originalcoverage.projection_string), 0)
self.assertEqual(logical_file.metadata.originalcoverage.unit, 'unknown')
self.assertEqual(logical_file.metadata.originalcoverage.eastlimit, -66.9692712587578)
self.assertEqual(logical_file.metadata.originalcoverage.northlimit, 71.406235393967)
self.assertEqual(logical_file.metadata.originalcoverage.southlimit, 18.921786345087)
self.assertEqual(logical_file.metadata.originalcoverage.westlimit, -178.217598362366)
def assert_ref_time_series_file_type_metadata(self):
# check that there is one RefTimeseriesLogicalFile object
self.assertEqual(RefTimeseriesLogicalFile.objects.count(), 1)
# test extracted metadata that updates resource level metadata
# resource title should have been updated from the title value in json file
res_title = "Sites, Variable"
self.composite_resource.metadata.refresh_from_db()
self.assertEqual(self.composite_resource.metadata.title.value, res_title)
# resource abstract should have been updated from the abstract value in json file
abstract = "Discharge, cubic feet per second,Blue-green algae (cyanobacteria), " \
"phycocyanin data collected from 2016-04-06 to 2017-02-09 created on " \
"Thu Apr 06 2017 09:15:56 GMT-0600 (Mountain Daylight Time) from the " \
"following site(s): HOBBLE CREEK AT 1650 WEST AT SPRINGVILLE, UTAH, and " \
"Provo River at Charleston Advanced Aquatic. Data created by " \
"CUAHSI HydroClient: http://data.cuahsi.org/#."
self.assertEqual(self.composite_resource.metadata.description.abstract, abstract)
# test keywords - resource level keywords should have been updated with data from the json
# file
keywords = [kw.value for kw in self.composite_resource.metadata.subjects.all()]
for kw in keywords:
self.assertIn(kw, ["Time Series", "CUAHSI"])
# test coverage metadata
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 40.48498)
self.assertEqual(box_coverage.value['eastlimit'], -111.46245)
self.assertEqual(box_coverage.value['southlimit'], 40.1788719)
self.assertEqual(box_coverage.value['westlimit'], -111.639338)
temporal_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('04/06/2016').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('02/09/2017').date())
# test file level metadata
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, res_title)
for kw in logical_file.metadata.keywords:
self.assertIn(kw, ["Time Series", "CUAHSI"])
box_coverage = logical_file.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'Unknown')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 40.48498)
self.assertEqual(box_coverage.value['eastlimit'], -111.46245)
self.assertEqual(box_coverage.value['southlimit'], 40.1788719)
self.assertEqual(box_coverage.value['westlimit'], -111.639338)
temporal_coverage = logical_file.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('04/06/2016').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('02/09/2017').date())
# file level abstract
self.assertEqual(logical_file.metadata.abstract, abstract)
# there should be 2 time series
self.assertEqual(len(logical_file.metadata.time_series_list), 2)
# test site related metadata
self.assertEqual(len(logical_file.metadata.sites), 2)
site_names = [site.name for site in logical_file.metadata.sites]
self.assertIn("HOBBLE CREEK AT 1650 WEST AT SPRINGVILLE, UTAH", site_names)
self.assertIn("Provo River at Charleston Advanced Aquatic", site_names)
site_codes = [site.code for site in logical_file.metadata.sites]
self.assertIn("NWISDV:10153100", site_codes)
self.assertIn("Provo River GAMUT:PR_CH_AA", site_codes)
site_lats = [site.latitude for site in logical_file.metadata.sites]
self.assertIn(40.178871899999997, site_lats)
self.assertIn(40.48498, site_lats)
site_lons = [site.longitude for site in logical_file.metadata.sites]
self.assertIn(-111.639338, site_lons)
self.assertIn(-111.46245, site_lons)
# there should be 2 variables
self.assertEqual(len(logical_file.metadata.variables), 2)
var_names = [var.name for var in logical_file.metadata.variables]
self.assertIn("Discharge, cubic feet per second", var_names)
self.assertIn("Blue-green algae (cyanobacteria), phycocyanin", var_names)
var_codes = [var.code for var in logical_file.metadata.variables]
self.assertIn("NWISDV:00060/DataType=MEAN", var_codes)
self.assertIn("iutah:BGA", var_codes)
# there should be 2 web services
self.assertEqual(len(logical_file.metadata.web_services), 2)
web_urls = [web.url for web in logical_file.metadata.web_services]
self.assertIn("http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?WSDL", web_urls)
self.assertIn("http://data.iutahepscor.org/ProvoRiverWOF/cuahsi_1_1.asmx?WSDL", web_urls)
web_service_types = [web.service_type for web in logical_file.metadata.web_services]
self.assertIn("SOAP", web_service_types)
self.assertEqual(len(set(web_service_types)), 1)
web_reference_types = [web.reference_type for web in logical_file.metadata.web_services]
self.assertIn("WOF", web_reference_types)
web_return_types = [web.return_type for web in logical_file.metadata.web_services]
self.assertIn("WaterML 1.1", web_return_types)
def assert_time_series_file_type_metadata(self):
"""Test timeseries file type metadata extraction. """
# check that there is one TimeSeriesLogicalFile object
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 1)
# check that there is one TimeSeriesFileMetaData object
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertTrue(isinstance(logical_file.metadata, TimeSeriesFileMetaData))
# test extracted metadata that updates resource level metadata
# there should one content file - sqlite file
self.assertEqual(self.composite_resource.files.all().count(), 1)
# there should be one contributor element
self.assertEqual(self.composite_resource.metadata.contributors.all().count(), 1)
# test core metadata after metadata extraction
extracted_title = "Water temperature data from the Little Bear River, UT"
self.assertEqual(self.composite_resource.metadata.title.value, extracted_title)
# there should be an abstract element
self.assertNotEqual(self.composite_resource.metadata.description, None)
extracted_abstract = "This dataset contains time series of observations of water " \
"temperature in the Little Bear River, UT. Data were recorded every " \
"30 minutes. The values were recorded using a HydroLab MS5 " \
"multi-parameter water quality sonde connected to a Campbell " \
"Scientific datalogger."
self.assertEqual(self.composite_resource.metadata.description.abstract.strip(),
extracted_abstract)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(type='box').count(), 1)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(
type='period').count(), 1)
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
temporal_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('01/01/2008').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('01/31/2008').date())
# there should be one format element
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 1)
format_element = self.composite_resource.metadata.formats.all().first()
self.assertEqual(format_element.value, 'application/sqlite')
# there should be one subject element
self.assertEqual(self.composite_resource.metadata.subjects.all().count(), 1)
subj_element = self.composite_resource.metadata.subjects.all().first()
self.assertEqual(subj_element.value, 'Temperature')
# test that we put the sqlite file into a new directory
res_file = self.composite_resource.files.first()
file_path, base_file_name = res_file.full_path, res_file.file_name
expected_file_path = u"{}/data/contents/ODM2_Multi_Site_One_Variable/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
logical_file = res_file.logical_file
# logical file should be associated with 1 file
self.assertEqual(logical_file.files.all().count(), 1)
res_file = logical_file.files.first()
self.assertIn('.sqlite', res_file.extension)
# test file level metadata extraction
# there should be a total of 7 timeseries
self.assertEqual(logical_file.metadata.time_series_results.all().count(), 7)
# testing extended metadata elements
# test title/dataset name
self.assertEqual(logical_file.dataset_name, extracted_title)
# test abstract
self.assertEqual(logical_file.metadata.abstract, extracted_abstract)
# there should be one keyword element
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertIn('Temperature', logical_file.metadata.keywords)
# test spatial coverage
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
# test temporal coverage
temporal_coverage = logical_file.metadata.temporal_coverage
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('01/01/2008').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('01/31/2008').date())
# test 'site' - there should be 7 sites
self.assertEqual(logical_file.metadata.sites.all().count(), 7)
# each site be associated with one series id
for site in logical_file.metadata.sites.all():
self.assertEqual(len(site.series_ids), 1)
# test the data for a specific site
site = logical_file.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertNotEqual(site, None)
site_name = 'Little Bear River at McMurdy Hollow near Paradise, Utah'
self.assertEqual(site.site_name, site_name)
self.assertEqual(site.elevation_m, 1445)
self.assertEqual(site.elevation_datum, 'NGVD29')
self.assertEqual(site.site_type, 'Stream')
self.assertEqual(site.latitude, 41.575552)
self.assertEqual(site.longitude, -111.855217)
# test 'variable' - there should be 1 variable element
self.assertEqual(logical_file.metadata.variables.all().count(), 1)
variable = logical_file.metadata.variables.all().first()
# there should be 7 series ids associated with this one variable
self.assertEqual(len(variable.series_ids), 7)
# test the data for a variable
self.assertEqual(variable.variable_code, 'USU36')
self.assertEqual(variable.variable_name, 'Temperature')
self.assertEqual(variable.variable_type, 'Water Quality')
self.assertEqual(variable.no_data_value, -9999)
self.assertEqual(variable.variable_definition, None)
self.assertEqual(variable.speciation, 'Not Applicable')
# test 'method' - there should be 1 method element
self.assertEqual(logical_file.metadata.methods.all().count(), 1)
method = logical_file.metadata.methods.all().first()
# there should be 7 series ids associated with this one method element
self.assertEqual(len(method.series_ids), 7)
self.assertEqual(method.method_code, '28')
method_name = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_name, method_name)
self.assertEqual(method.method_type, 'Instrument deployment')
method_des = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_description, method_des)
self.assertEqual(method.method_link, None)
# test 'processing_level' - there should be 1 processing_level element
self.assertEqual(logical_file.metadata.processing_levels.all().count(), 1)
proc_level = logical_file.metadata.processing_levels.all().first()
# there should be 7 series ids associated with this one element
self.assertEqual(len(proc_level.series_ids), 7)
self.assertEqual(proc_level.processing_level_code, 1)
self.assertEqual(proc_level.definition, 'Quality controlled data')
explanation = 'Quality controlled data that have passed quality assurance procedures ' \
'such as routine estimation of timing and sensor calibration or visual ' \
'inspection and removal of obvious errors. An example is USGS published ' \
'streamflow records following parsing through USGS quality control ' \
'procedures.'
self.assertEqual(proc_level.explanation, explanation)
# test 'timeseries_result' - there should be 7 timeseries_result element
self.assertEqual(logical_file.metadata.time_series_results.all().count(), 7)
ts_result = logical_file.metadata.time_series_results.filter(
series_ids__contains=['182d8fa3-1ebc-11e6-ad49-f45c8999816f']).first()
self.assertNotEqual(ts_result, None)
# there should be only 1 series id associated with this element
self.assertEqual(len(ts_result.series_ids), 1)
self.assertEqual(ts_result.units_type, 'Temperature')
self.assertEqual(ts_result.units_name, 'degree celsius')
self.assertEqual(ts_result.units_abbreviation, 'degC')
self.assertEqual(ts_result.status, 'Unknown')
self.assertEqual(ts_result.sample_medium, 'Surface Water')
self.assertEqual(ts_result.value_count, 1441)
self.assertEqual(ts_result.aggregation_statistics, 'Average')
# test for CV lookup tables
# there should be 23 CV_VariableType records
self.assertEqual(logical_file.metadata.cv_variable_types.all().count(), 23)
# there should be 805 CV_VariableName records
self.assertEqual(logical_file.metadata.cv_variable_names.all().count(), 805)
# there should be 145 CV_Speciation records
self.assertEqual(logical_file.metadata.cv_speciations.all().count(), 145)
# there should be 51 CV_SiteType records
self.assertEqual(logical_file.metadata.cv_site_types.all().count(), 51)
# there should be 5 CV_ElevationDatum records
self.assertEqual(logical_file.metadata.cv_elevation_datums.all().count(), 5)
# there should be 25 CV_MethodType records
self.assertEqual(logical_file.metadata.cv_method_types.all().count(), 25)
# there should be 179 CV_UnitsType records
self.assertEqual(logical_file.metadata.cv_units_types.all().count(), 179)
# there should be 4 CV_Status records
self.assertEqual(logical_file.metadata.cv_statuses.all().count(), 4)
# there should be 17 CV_Medium records
self.assertEqual(logical_file.metadata.cv_mediums.all().count(), 18)
# there should be 17 CV_aggregationStatistics records
self.assertEqual(logical_file.metadata.cv_aggregation_statistics.all().count(), 17)
# there should not be any UTCOffset element
self.assertEqual(logical_file.metadata.utc_offset, None)
|
ResearchSoftwareInstitute/MyHPOM
|
hs_file_types/tests/utils.py
|
Python
|
bsd-3-clause
| 32,673
|
[
"NetCDF"
] |
355d70163dd7e721567578df69ee65b2ab38d81015519711d3e5bce6753e3068
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, re
from QueueManager import QueueManager
from TestHarness import util # to execute qsub
import math # to compute node requirement
## This Class is responsible for maintaining an interface to the PBS scheduling syntax
class RunPBS(QueueManager):
@staticmethod
def validParams():
params = QueueManager.validParams()
params.addParam('queue_template', os.path.join(os.path.abspath(os.path.dirname(__file__)), 'pbs_template'), "Location of the PBS template")
return params
def __init__(self, harness, params):
QueueManager.__init__(self, harness, params)
self.params = params
self.harness = harness
self.options = self.harness.getOptions()
def getBadKeyArgs(self):
""" arguments we need to remove from sys.argv """
return ['--pbs']
def hasTimedOutOrFailed(self, job_data):
""" use qstat and return bool on job failures outside of the TestHarness's control """
launch_id = job_data.json_data.get(job_data.job_dir,
{}).get(job_data.plugin,
{}).get('ID', "").split('.')[0]
# We shouldn't run into a null, but just in case, lets handle it
if launch_id:
qstat_command_result = util.runCommand('qstat -xf %s' % (launch_id))
# handle a qstat execution failure for some reason
if qstat_command_result.find('ERROR') != -1:
# set error for each job contained in group
for job in job_data.jobs.getJobs():
job.setOutput('ERROR invoking `qstat`\n%s' % (qstat_command_result))
job.setStatus(job.error, 'QSTAT')
return True
qstat_job_result = re.findall(r'Exit_status = (\d+)', qstat_command_result)
# woops. This job was killed by PBS by exceeding walltime
if qstat_job_result and qstat_job_result[0] == "271":
for job in job_data.jobs.getJobs():
job.addCaveats('Killed by PBS Exceeded Walltime')
return True
# Capture TestHarness exceptions
elif qstat_job_result and qstat_job_result[0] != "0":
# Try and gather some useful output we can tack on to one of the job objects
output_file = job_data.json_data.get(job_data.job_dir, {}).get(job_data.plugin, {}).get('QSUB_OUTPUT', "")
if os.path.exists(output_file):
with open(output_file, 'r') as f:
output_string = util.readOutput(f, None, job_data.jobs.getJobs()[0].getTester())
job_data.jobs.getJobs()[0].setOutput(output_string)
# Add a caveat to each job, explaining that one of the jobs caused a TestHarness exception
for job in job_data.jobs.getJobs():
job.addCaveats('TESTHARNESS EXCEPTION')
return True
def _augmentTemplate(self, job):
""" populate qsub script template with paramaters """
template = {}
# Launch script location
template['launch_script'] = os.path.join(job.getTestDir(), os.path.basename(job.getTestNameShort()) + '.qsub')
# NCPUS
template['mpi_procs'] = job.getMetaData().get('QUEUEING_NCPUS', 1)
# Compute node requirement
if self.options.pbs_node_cpus:
nodes = template['mpi_procs']/self.options.pbs_node_cpus
else:
nodes = 1
template['nodes'] = math.ceil(nodes)
# Convert MAX_TIME to hours:minutes for walltime use
max_time = job.getMetaData().get('QUEUEING_MAXTIME', 1)
hours = int(int(max_time) / 3600)
minutes = int(int(max_time) / 60) % 60
template['walltime'] = '{0:02d}'.format(hours) + ':' + '{0:02d}'.format(minutes) + ':00'
# Job Name
template['job_name'] = os.path.basename(job.getTestNameShort())
# PBS Project group
template['pbs_project'] = '#PBS -P %s' % (self.options.queue_project)
# PBS Queue
if self.options.queue_queue:
template['pbs_queue'] = '#PBS -q %s' % (self.options.queue_queue)
else:
template['pbs_queue'] = ''
# Apply source command
if self.options.queue_source_command and os.path.exists(self.options.queue_source_command):
template['pre_command'] = 'source %s || exit 1' % (os.path.abspath(self.options.queue_source_command))
else:
template['pre_command'] = ''
# Redirect stdout to this location
template['output'] = os.path.join(job.getTestDir(), 'qsub.output')
# Root directory
template['working_dir'] = self.harness.base_dir
# Command
template['command'] = ' '.join(self.getRunTestsCommand(job))
return template
def run(self, job):
""" execute qsub and return the launch id """
template = self._augmentTemplate(job)
tester = job.getTester()
self.createQueueScript(job, template)
command = ' '.join(['qsub', template['launch_script']])
launch_results = util.runCommand(command, job.getTestDir())
# List of files we need to clean up when we are done
dirty_files = [template['launch_script'],
template['output']]
self.addDirtyFiles(job, dirty_files)
if launch_results.find('ERROR') != -1:
# The executor job failed (so fail all jobs in this group)
job_dag = job.getDAG()
for other_job in [x for x in job_dag.topological_sort() if x != job]:
other_job.clearCaveats()
other_tester = other_job.getTester()
other_tester.setStatus(other_tester.fail, 'launch failure')
# This is _only_ to make the failed message more useful
tester.specs['command'] = command
tester.setStatus(tester.fail, 'QSUB Group Failure')
job.setOutput(launch_results)
else:
job.addMetaData(RunPBS={'ID' : launch_results,
'QSUB_COMMAND' : command,
'NCPUS' : template['mpi_procs'],
'WALLTIME' : template['walltime'],
'QSUB_OUTPUT' : template['output']})
tester.setStatus(tester.queued, 'LAUNCHING')
|
nuclear-wizard/moose
|
python/TestHarness/schedulers/RunPBS.py
|
Python
|
lgpl-2.1
| 6,785
|
[
"MOOSE"
] |
60c0e90a8d9ab6b6cbe0df691b143fad3fecfc97c28582699127aba39f4e28c2
|
# coastlib, a coastal engineering Python library
# Copyright (C), 2019 Georgii Bocharov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pickle
import corner
import emcee
import matplotlib.pyplot as plt
import matplotlib.ticker
import mpmath
import numpy as np
import pandas as pd
import scipy.stats
import statsmodels.api as sm
import coastlib.math.derivatives
import coastlib.stats.distributions
# Helper function used to handle quantiles of empty arrays
def empty_quantile(array, *args, **kwargs):
if len(array) > 0:
return np.nanquantile(array, *args, **kwargs)
else:
return np.nan
class EVA:
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Extracts extreme values. Provides assistance in threshold value selection for the POT method.
Estimates parameters of distributions for given data using Maximum Likelihood Estimate (MLE)
or estimates posterior distributions of parameters of distributions using Markov chain Monte Carlo (MCMC).
For given return periods gives estimates of return values and associated confidence intervals.
Generates various statistical plots such as return value plot and QQ/PP plots.
Provides multiple goodness-of-fit (GOF) statistics and tests.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Public Attributes
-----------------
self.__init__()
self.dataframe : pd.DataFrame
self.column : str
self.block_size : float
self.gap_length : float
self.number_of_blocks : float
self.dataframe_declustered : np.ndarray
self.get_extremes()
self.extremes_method : str
self.extremes_type : str
self.threshold : float
self.block_boundaries : np.ndarray
self.extremes : pd.DataFrame
self.extremes_rate : float
self.plotting_position : str
self.fit()
self.distribution_name : str
self.fit_method : str
self.fit_parameters : tuple
self.scipy_fit_options : dict
self.sampler : emcee.EnsembleSampler
self.mcmc_chain : np.ndarray
self.fixed_parameters : np.ndarray
self.generate_results()
self.results : pd.DataFrame
Private Attributes
------------------
self.__init__()
self.__status : dict
Public Methods
--------------
self.to_pickle
self.read_pickle
self.get_extremes
self.plot_extremes
self.plot_mean_residual_life
self.plot_parameter_stability
self.test_extremes
self.fit
self.plot_trace
self.plot_corner
self.plot_posterior
self.return_value
self.confidence_interval
self.generate_results
self.plot_summary
self.pdf
self.cdf
self.ppf
self.isf
self.plot_qq
self.goodness_of_fit
Private Methods
---------------
self.__init__
self.__get_blocks
self.__update
self.__repr__
self.__get_return_period
self.__run_mcmc
self._kernel_fit_parameters
self.__monte_carlo
self.__delta
self.__get_property
"""
def __init__(self, dataframe, column=None, block_size=365.2425, gap_length=24):
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Calculates number of blocks with <block_size>, accounting for gaps if <gap_length> is given.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
"""
# Ensure passed <dataframe> is a pd.Dataframe object or can be converted to one
if isinstance(dataframe, pd.DataFrame):
self.dataframe = dataframe
elif isinstance(dataframe, pd.Series):
self.dataframe = dataframe.to_frame()
else:
raise TypeError(f'<dataframe> must be {pd.DataFrame} or {pd.Series}, {type(dataframe)} was passed')
# Ensure <dataframe> index is pd.DatetimeIndex object
if not isinstance(dataframe.index, pd.DatetimeIndex):
raise TypeError(f'<dataframe> index must be {pd.DatetimeIndex}, {type(dataframe.index)} was passed')
self.dataframe.sort_index(ascending=True, inplace=True)
# Ensure passed <column> represents a column within <dataframe>
if column is not None:
if isinstance(column, int):
if column < len(self.dataframe.columns):
self.column = self.dataframe.columns[column]
else:
raise ValueError(f'<column> with index {column} is not valid for '
f'dataframe with {len(self.dataframe.columns)} columns')
elif isinstance(column, str):
if column in self.dataframe.columns:
self.column = column
else:
raise ValueError(f'Column {column} is not valid for given dataframe.\n'
f'Valid columns are {self.dataframe.columns}')
else:
raise TypeError(f'Column must be {str} or {int}, {type(column)} was passed.')
else:
self.column = self.dataframe.columns[0]
# Ensure no nans are present in the <dataframe> <column>
nancount = np.sum(np.isnan(self.dataframe[self.column].values))
if nancount > 0:
raise ValueError(f'<dataframe> contains {nancount} NaN values in column {self.column}.'
f'\nNaN values must be removed or filled before performing analysis.')
# Ensure values in <dataframe> <column> are real numbers
if not np.all(np.isreal(self.dataframe[self.column].values)):
raise ValueError(f'Values in <dataframe> <column> must be real numbers,'
f' {self.dataframe[self.column].values.dtype} was passed')
# Calculate number of blocks of <block_size> in <dataframe>
self.block_size = block_size
self.gap_length = gap_length
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Separate data into clusters using gap_length and plot each cluster independently
# This way distant clusters are not connected on the plot
if self.gap_length is not None:
cluster_values = [[self.dataframe[self.column].values.copy()[0]]]
cluster_indexes = [[self.dataframe.index.values.copy()[0]]]
for index, value in zip(self.dataframe.index, self.dataframe[self.column].values):
# New cluster encountered
if index - cluster_indexes[-1][-1] > np.timedelta64(pd.Timedelta(hours=self.gap_length)):
cluster_values.append([value])
cluster_indexes.append([index])
# Continuing within current cluster
else:
cluster_values[-1].append(value)
cluster_indexes[-1].append(index)
cluster_indexes = np.array(cluster_indexes)
cluster_values = np.array(cluster_values)
self.dataframe_declustered = np.array([cluster_indexes, cluster_values])
else:
self.dataframe_declustered = None
# Initialize internal status
# Internal status is used to delete calculation results when earlier methods are called
# e.g. removes fit data and results when extreme events are exctracted. This prevents conflicts and errors
self.__status = dict(
extremes=False,
fit=False,
results=False
)
# Extremes extraction
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
# Extremes fit
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
# Results
self.results = None
def __get_blocks(self, gap_length):
"""
Calculates number of blocks of size <self.block_size> in <self.dataframe> <self.column>.
Parameters
----------
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Returns
-------
n : float
Number of blocks.
"""
# Calculate number of blocks with gaps accounted for
if gap_length is not None:
timedelta = np.timedelta64(pd.Timedelta(hours=gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > timedelta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=gap_length/10))
series_range = np.float64(new_index[-1] - new_index[0])
# Calculate number of blocks with gaps not accounted for
else:
series_range = np.float64((self.dataframe.index[-1] - self.dataframe.index[0]).value)
return series_range / 1e9 / 60 / 60 / 24 / self.block_size
def __update(self):
"""
Updates internal state of the EVA class instance object.
This method is used to delete calculation results when earlier methods are called.
For example, removes all data related to fit and results when extreme events are extracted.
"""
if not self.__status['extremes']:
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
if not self.__status['fit']:
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
if not self.__status['results']:
self.results = None
def __repr__(self):
"""
Generates a string with a summary of the EVA class instance object state.
"""
series_range = (self.dataframe.index[-1] - self.dataframe.index[0]).value / 1e9 / 60 / 60 / 24
summary = str(
f'{" "*35}Extreme Value Analysis Summary\n'
f'{"="*100}\n'
f'Analyzed parameter{self.column:>29}{" "*6}Series length{series_range:29.2f} days\n'
f'Gap length{self.gap_length:31.2f} hours{" "*6}'
f'Adjusted series length{self.number_of_blocks*self.block_size:20.2f} days\n'
f'Block size{self.block_size:32.2f} days{" "*6}Number of blocks{self.number_of_blocks:31.2f}\n'
f'{"="*100}\n'
)
if self.__status['extremes']:
summary += str(
f'Number of extreme events{len(self.extremes):23}{" "*6}Extraction method{self.extremes_method:>30}\n'
f'Extreme event rate{self.extremes_rate:16.2f} events/block{" "*6}'
f'Plotting position{self.plotting_position:>30}\n'
f'Threshold{self.threshold:38.2f}{" "*6}Extreme values type{self.extremes_type:>28}\n'
f'{"="*100}\n'
)
else:
summary += str(
f'Number of extreme events{"N/A":>23}{" " * 6}Extraction method{"N/A":>30}\n'
f'Extreme event rate{"N/A":>16} events/block{" " * 6}'
f'Plotting position{"N/A":>30}\n'
f'Threshold{"N/A":>38}{" "*6}Extreme values type{"N/A":>28}\n'
f'{"=" * 100}\n'
)
if self.__status['fit']:
if self.fit_method == 'MCMC':
fit_parameters = self._kernel_fit_parameters(
burn_in=int(self.mcmc_chain.shape[1] / 2),
kernel_steps=100
)
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Markov chain Monte Carlo":>37}\n'
f'MCMC fit parameters (approximate){str(np.round(fit_parameters, 3)):>14}\n'
f'{"=" * 100}'
)
elif self.fit_method == 'MLE':
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Maximum Likelihood Estimate":>37}\n'
f'MLE fit parameters{str(np.round(self.fit_parameters, 3)):>29}\n'
f'{"=" * 100}'
)
else:
summary += str(
f'Distribution{"N/A":>35}{" " * 6}Fit method{"N/A":>37}\n'
f'Fit parameters{"N/A":>33}\n'
f'{"=" * 100}'
)
return summary
def to_pickle(self, path):
"""
Exports EVA object to a .pyc file. Preserves all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def read_pickle(path):
"""
Reads a .pyc file with EVA object. Loads all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
Returns
-------
file : EVA class instance object
Saved EVA object with all data and internal state preserved.
"""
with open(path, 'rb') as f:
file = pickle.load(f)
return file
def get_extremes(self, method='BM', plotting_position='Weibull', extremes_type='high', **kwargs):
"""
Extracts extreme values from <self.dataframe> <self.column> using the BM (Block Maxima)
or the POT (Peaks Over Threshold) methods. If method is POT, also declusters extreme values using
the runs method (aka minimum distance between independent events).
Parameters
----------
method : str, optional
Peak extraction method. 'POT' for Peaks Over Threshold and 'BM' for Block Maxima (default='BM').
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
kwargs
for method='POT'
threshold : float
Threshold for extreme value extraction.
Only values above (below, if <extremes_type='low'>) this threshold are extracted.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
Returns
-------
Creates a <self.extremes> dataframe with extreme values and return periods determined using
the given plotting position as p=(rank-alpha)/(N+1-alpha-beta) and T=1/(1-p).
"""
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if extremes_type not in ['high', 'low']:
raise ValueError(f'<extremes_type> must be high or low, {extremes_type} was passed')
self.extremes_type = extremes_type
# Block Maxima method
if method == 'BM':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Set threshold to 0 for compatibility between BM and POT formulas
self.extremes_method = 'Block Maxima'
self.threshold = 0
# Generate new index with gaps eliminated
if self.gap_length is not None:
gap_delta = np.timedelta64(pd.Timedelta(hours=self.gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > gap_delta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=self.gap_length/10))
else:
new_index = self.dataframe.index.values.copy()
# Create local reindexed dataframe with <new_index> and <id> column to get original datetime later
local_dataframe = pd.DataFrame(
data=self.dataframe[self.column].values.copy(),
columns=[self.column], index=new_index
)
local_dataframe['id'] = np.arange(len(local_dataframe))
# Find boundaries of blocks of <self.block_size>
block_delta = np.timedelta64(pd.Timedelta(days=self.block_size))
block_boundaries = [(new_index[0], new_index[0] + block_delta)]
self.block_boundaries = [self.dataframe.index.values.copy()[0]]
while block_boundaries[-1][-1] < local_dataframe.index.values[-1]:
block_boundaries.append(
(block_boundaries[-1][-1], block_boundaries[-1][-1] + block_delta)
)
self.block_boundaries.append(
self.dataframe.index.values.copy()[
local_dataframe.truncate(before=block_boundaries[-1][0])['id'].values[0]
]
)
self.block_boundaries.append(self.block_boundaries[-1] + block_delta)
self.block_boundaries = np.array(self.block_boundaries)
block_boundaries = np.array(block_boundaries)
# Update number_of_blocks
self.number_of_blocks = len(self.block_boundaries) - 1
# Find extreme values within each block and associated datetime indexes from original dataframe
extreme_values, extreme_indexes = [], []
for i, block_boundary in enumerate(block_boundaries):
if i == len(block_boundaries) - 1:
local_data = local_dataframe[local_dataframe.index >= block_boundary[0]]
else:
local_data = local_dataframe[
(local_dataframe.index >= block_boundary[0]) & (local_dataframe.index < block_boundary[1])
]
if len(local_data) != 0:
if self.extremes_type == 'high':
extreme_values.append(local_data[self.column].values.copy().max())
else:
extreme_values.append(local_data[self.column].values.copy().min())
local_index = self.dataframe.index.values.copy()[
local_data[local_data[self.column].values == extreme_values[-1]]['id']
]
if np.isscalar(local_index):
extreme_indexes.append(local_index)
else:
extreme_indexes.append(local_index[0])
self.extremes = pd.DataFrame(data=extreme_values, columns=[self.column], index=extreme_indexes)
# Peaks Over Threshold method
elif method == 'POT':
self.threshold = kwargs.pop('threshold')
r = kwargs.pop('r', 24)
adjust_threshold = kwargs.pop('adjust_threshold', True)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
self.extremes_method = 'Peaks Over Threshold'
# Make sure correct number of blocks is used (overrides previously created BM values)
if isinstance(self.number_of_blocks, int):
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Extract raw extremes
if self.extremes_type == 'high':
self.extremes = self.dataframe[self.dataframe[self.column] > self.threshold][self.column].to_frame()
else:
self.extremes = self.dataframe[self.dataframe[self.column] < self.threshold][self.column].to_frame()
# Decluster raw extremes using runs method
if r is not None:
r = np.timedelta64(pd.Timedelta(hours=r))
last_cluster_index = self.extremes.index.values.copy()[0]
peak_cluster_values = [self.extremes[self.column].values.copy()[0]]
peak_cluster_indexes = [self.extremes.index.values.copy()[0]]
for index, value in zip(self.extremes.index, self.extremes[self.column].values):
# New cluster encountered
if index - last_cluster_index > r:
peak_cluster_values.append(value)
peak_cluster_indexes.append(index)
# Continuing within current cluster
else:
# Update cluster peak
if self.extremes_type == 'high':
if value > peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
else:
if value < peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
# Index of previous cluster - lags behind <index> by 1
last_cluster_index = index
self.extremes = pd.DataFrame(
data=peak_cluster_values, index=peak_cluster_indexes, columns=[self.column]
)
# Update threshold to smallest/largest extreme value in order to fix the GPD location parameter at 0.
# GPD is very unstable with non-zero location.
if adjust_threshold:
if self.extremes_type == 'high':
self.threshold = self.extremes[self.column].values.min()
else:
self.threshold = self.extremes[self.column].values.max()
else:
raise ValueError(f'Method {method} not recognized')
self.extremes.index.name = self.dataframe.index.name
# Calculate rate of extreme events (events/block)
self.extremes_rate = len(self.extremes) / self.number_of_blocks
# Assign ranks to data with duplicate values having average of ranks they would have individually
self.plotting_position = plotting_position
self.extremes['Return Period'] = self.__get_return_period(plotting_position=self.plotting_position)
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
def __get_return_period(self, plotting_position, return_cdf=False):
"""
Assigns return periods to extracted extreme events and updates the <self.extremes> index.
Parameters
----------
plotting_position : str
Plotting position. Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
return_cdf : bool, optional
If True, returns cdf of extracted extremes (default=False).
"""
# Assign ranks to data with duplicate values having average of ranks they would have individually
if self.extremes_type == 'high':
ranks = scipy.stats.rankdata(self.extremes[self.column].values, method='average')
else:
ranks = len(self.extremes) + 1 - scipy.stats.rankdata(self.extremes[self.column].values, method='average')
# Calculate return periods using a specified plotting position
# https://matplotlib.org/mpl-probscale/tutorial/closer_look_at_plot_pos.html
plotting_positions = {
'ECDF': (0, 1),
'Hazen': (0.5, 0.5),
'Weibull': (0, 0),
'Laplace': (-1, -1),
'Tukey': (1 / 3, 1 / 3),
'Blom': (3 / 8, 3 / 8),
'Median': (0.3175, 0.3175),
'Cunnane': (0.4, 0.4),
'Gringorten': (0.44, 0.44),
'Gumbel': (1, 1)
}
if plotting_position not in plotting_positions:
raise ValueError(f'Plotting position {plotting_position} not recognized')
alpha, beta = plotting_positions[plotting_position][0], plotting_positions[plotting_position][1]
cdf = (ranks - alpha) / (len(self.extremes) + 1 - alpha - beta)
if return_cdf:
return cdf
# Survival function - aka upper tail probability or probability of exceedance
sf = 1 - cdf
return 1 / sf / self.extremes_rate
def plot_extremes(self):
"""
Plots extracted extreme values on top of <self.dataframe> <self.column> observed time series.
Shows boundaries of blocks for the Block Maxima method and threshold level for the Peaks Over Threshold method.
Returns
-------
tuple(fig, ax)
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Run self.get_extremes() first')
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
self.extremes.index, self.extremes[self.column],
edgecolors='white', marker='s', facecolors='k', s=40, lw=1, zorder=15
)
if self.gap_length is None:
ax.plot(
self.dataframe.index, self.dataframe[self.column],
color='#3182bd', lw=.5, alpha=.8, zorder=5
)
else:
for x, y in zip(self.dataframe_declustered[0], self.dataframe_declustered[1]):
ax.plot(x, y, color='#3182bd', lw=.5, alpha=.8, zorder=5)
if self.extremes_method == 'Block Maxima':
for _block in self.block_boundaries:
ax.axvline(_block, color='k', ls='--', lw=1, zorder=10)
elif self.extremes_method == 'Peaks Over Threshold':
ax.axhline(self.threshold, color='k', ls='--', lw=1, zorder=10)
ax.set_title(f'Extreme Values Time Series, {self.extremes_method}')
if len(self.dataframe.index.name) > 0:
ax.set_xlabel(f'{self.dataframe.index.name}')
else:
ax.set_xlabel('Date')
ax.set_ylabel(f'{self.column}')
annot = ax.annotate(
'', xy=(self.extremes.index[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
self.extremes.index[0], self.extremes[self.column].values[0],
edgecolors='white', marker='s', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return period : {self.extremes["Return Period"].values[n]:.2f}\n'
f'Plotting position : {self.plotting_position}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
def plot_mean_residual_life(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True):
"""
Plots means of residuals against thresholds.
Threshold should be chosen as the smallest threshold in a region where the mean residuals' plot
is approximately linear. Generalized Pareto Distribution is asymptotically valid in this region.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
POT method only: minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False : tuple(thresholds, residuals, confidence_low, confidence_top)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
# Find mean residuals and 95% confidence interval for each threshold
residuals, confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
residuals.append(exceedances.mean())
# Ubiased estimator of sample variance of mean s^2/n
confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=exceedances.mean(),
scale=exceedances.std(ddof=1)/np.sqrt(len(exceedances))
)
)
else:
residuals.append(np.nan)
confidence.append((np.nan, np.nan))
residuals = np.array(residuals)
confidence = np.array(confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
residuals = residuals[mask]
confidence = confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
# Generate mean residual life plot
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_title('Mean Residual Life Plot')
ax.plot(thresholds, residuals, color='k', zorder=10, label='Mean residual life', lw=2)
ax.plot(thresholds, confidence.T[0], ls='--', color='k', lw=0.5, zorder=10)
ax.plot(thresholds, confidence.T[1], ls='--', color='k', lw=0.5, zorder=10)
ax.fill_between(
thresholds, confidence.T[0], confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval', zorder=5
)
ax.legend()
ax.set_xlabel('Threshold')
ax.set_ylabel('Mean Residual')
fig.tight_layout()
return fig, ax
else:
return thresholds, residuals, confidence.T[0], confidence.T[1]
def plot_parameter_stability(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True, dx='1e-10', precision=100):
"""
Plots shape and modified scale paramters of the Generalized Pareto Distribution (GPD) against thresholds.
GPD is asymptotically valid in a region where these parameters are approximately linear.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
if alpha is None : tuple(thresholds, shapes, modified_scales)
if alpha is passed : tuple(thresholds, shapes, modified_scales, shapes_confidence, scales_confidence)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
shapes, modified_scales = [], []
shapes_confidence, scales_confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
shape, loc, scale = scipy.stats.genpareto.fit(exceedances, floc=0)
shapes.append(shape)
# Define modified scale function (used as scalar function for delta method)
if extremes_type == 'high':
def mod_scale_function(*theta):
return theta[1] - theta[0] * true_thresholds[-1]
else:
def mod_scale_function(*theta):
return theta[1] + theta[0] * true_thresholds[-1]
modified_scales.append(mod_scale_function(shape, scale))
if alpha is not None:
with mpmath.workdps(precision):
# Define modified log_likehood function
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=_x, shape=theta[0], loc=0, scale=theta[1]
)
) for _x in exceedances
]
)
# Calculate delta (gradient) of scalar_function
if extremes_type == 'high':
delta_scalar = np.array(
[
[-true_thresholds[-1]],
[1]
]
)
else:
delta_scalar = np.array(
[
[true_thresholds[-1]],
[1]
]
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, coordinates=[shape, scale], dx=dx, precision=precision
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Estimate modified scale parameter confidence interval using delta method
variance = np.dot(
np.dot(delta_scalar.T, covariance), delta_scalar
).flatten()[0]
scales_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=modified_scales[-1], scale=np.sqrt(variance)
)
)
# Estimate shape parameter confidence interval directly from covariance matrix
shapes_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=shape, scale=np.sqrt(covariance[0][0])
)
)
# Number of exceedances below the limit
else:
shapes.append(np.nan)
modified_scales.append(np.nan)
if alpha is not None:
shapes_confidence.append((np.nan, np.nan))
scales_confidence.append((np.nan, np.nan))
# Convert results to np.ndarray objects
shapes = np.array(shapes)
modified_scales = np.array(modified_scales)
if alpha is not None:
shapes_confidence = np.array(shapes_confidence)
scales_confidence = np.array(scales_confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
shapes = shapes[mask]
modified_scales = modified_scales[mask]
if alpha is not None:
shapes_confidence = shapes_confidence[mask]
scales_confidence = scales_confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if plot:
with plt.style.context('bmh'):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8), sharex='all')
ax1.set_title('Parameter Stability Plot')
ax1.plot(thresholds, shapes, color='k', zorder=10, label='Shape parameter')
ax2.plot(thresholds, modified_scales, color='k', zorder=10, label='Modified scale parameter', lw=2)
if alpha is not None:
ax1.plot(thresholds, shapes_confidence.T[0], ls='--', color='k', lw=0.5)
ax1.plot(thresholds, shapes_confidence.T[1], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[0], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[1], ls='--', color='k', lw=0.5)
ax1.fill_between(
thresholds, shapes_confidence.T[0], shapes_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.fill_between(
thresholds, scales_confidence.T[0], scales_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.set_xlabel('Threshold')
ax1.set_ylabel('Shape parameter')
ax2.set_ylabel('Modified scale parameter')
ax1.legend()
ax2.legend()
fig.tight_layout()
return fig, (ax1, ax2)
else:
if alpha is None:
return thresholds, shapes, modified_scales
else:
return thresholds, shapes, modified_scales, shapes_confidence, scales_confidence
def test_extremes(self, method, **kwargs):
"""
Provides multiple methods to test independece of extracted extreme values.
Parameters
----------
method : str
Method for testing extreme values' independence.
Accepted methods:
'autocorrelation' - generates an autocorrelation plot
http://www.statsmodels.org/stable/generated/
statsmodels.tsa.stattools.acf.html#statsmodels.tsa.stattools.acf
'lag plot' - generates a lag plot for a given lag
'runs test' - return runs test statistic
https://en.wikipedia.org/wiki/Wald%E2%80%93Wolfowitz_runs_test
kwargs
for autocorrelation:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
nlags : int, optional
Number of lags to return autocorrelation for (default for all possible lags).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
unbiased : bool, optional
If True, then denominators for autocovariance are n-k, otherwise n (default=False)
for lag plot:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
lag : int, optional
Lag value (default=1).
for runs test:
alpha : float, optional
Significance level (default=0.05).
Returns
-------
for autocorrelation:
if plot=True : tuple(fig, ax)
if plot=False : tuple(lags, acorr, ci_low, ci_top)
for lag plot:
if plot=True : tuple(fig, ax)
if plot=False : tuple(x, y)
for runs test:
str(test summary)
"""
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to test')
if method == 'autocorrelation':
plot = kwargs.pop('plot', True)
nlags = kwargs.pop('nlags', len(self.extremes) - 1)
alpha = kwargs.pop('alpha', .95)
unbiased = kwargs.pop('unbiased', False)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
acorr, ci = sm.tsa.stattools.acf(
x=self.extremes[self.column].values, alpha=1-alpha, nlags=nlags, unbiased=unbiased
)
ci_low, ci_top = ci.T[0] - acorr, ci.T[1] - acorr
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.vlines(np.arange(nlags+1), [0], acorr, lw=1, color='k', zorder=15)
points = ax.scatter(
np.arange(nlags+1), acorr, marker='o', s=40, lw=1,
facecolor='k', edgecolors='white', zorder=20, label='Autocorrelation value'
)
ax.plot(np.arange(nlags+1)[1:], ci_low[1:], color='k', lw=.5, ls='--', zorder=15)
ax.plot(np.arange(nlags+1)[1:], ci_top[1:], color='k', lw=.5, ls='--', zorder=15)
ax.fill_between(
np.arange(nlags+1)[1:], ci_low[1:], ci_top[1:],
color='k', alpha=.1, zorder=5, label=f'{alpha*100:.0f}% confidence interval'
)
ax.axhline(0, color='k', lw=1, ls='--', zorder=10)
ax.legend()
ax.set_title('Autocorrelation plot')
ax.set_xlabel('Lag')
ax.set_ylabel('Correlation coefficient')
annot = ax.annotate(
'', xy=(0, 0),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
0, 0,
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=25
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Lag : {np.arange(nlags+1)[n]:d}\n'
f'Correlation : {acorr[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return np.arange(nlags+1), acorr, ci_low, ci_top
elif method == 'lag plot':
plot = kwargs.pop('plot', True)
lag = kwargs.pop('lag', 1)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if lag == 0:
x = self.extremes[self.column].values
else:
x = self.extremes[self.column].values[:-lag]
y = self.extremes[self.column].values[lag:]
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
x, y, marker='o', facecolor='k', s=40, edgecolors='white', lw=1, zorder=5
)
ax.set_xlabel(f'{self.column} i')
ax.set_ylabel(f'{self.column} i+{lag}')
ax.set_title('Extreme Values Lag Plot')
annotation = ax.annotate(
"", xy=(0, 0), xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
np.nanmean(x), np.nanmean(y),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annotation.set_visible(False)
def update_annotation(ind):
pos = points.get_offsets()[ind['ind'][0]]
annotation.xy = pos
point.set_offsets(pos)
text = "{}".format(" ".join(
[
f'{self.extremes.index[n]} : {ind["ind"][0]}\n'
f'{self.extremes.index[n+lag]} : {ind["ind"][0]+lag}'
for n in ind['ind']
]))
annotation.set_text(text)
def hover(event):
vis = annotation.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annotation(ind)
annotation.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annotation.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return x, y
elif method == 'runs test':
alpha = kwargs.pop('alpha', .05)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Calculate number of runs of shifted series
s = self.extremes[self.column].values - np.quantile(self.extremes[self.column].values, .5)
n_plus = np.sum(s > 0)
n_minus = np.sum(s < 0)
n_runs = 1
for i in range(1, len(s)):
# Change of sign
if s[i] * s[i-1] < 0:
n_runs += 1
mean = 2 * n_plus * n_minus / len(s) + 1
variance = (mean - 1) * (mean - 2) / (len(s) - 1)
test_statistic = (n_runs-mean)/np.sqrt(variance)
return str(
f'Ho : data is random\n'
f'Ha : data is not random\n\n'
f'Test statistic : N = {test_statistic:.2f}\n'
f'Significanse level : alpha = {alpha}\n'
f'Critical value : Nalpha = {scipy.stats.norm.ppf(1 - alpha / 2):.2f}\n'
f'Reject Ho if |N| > Nalpha'
)
else:
raise ValueError(f'Method {method} not recognized. Try: autocorrelation')
def fit(self, distribution_name, fit_method='MLE', **kwargs):
"""
Depending on fit method, either creates a tuple with maximum likelihood estimate (MLE)
or an array with samples drawn from posterior distribution of parameters (MCMC).
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
fit_method : str, optional
Fit method - MLE (Maximum Likelihood Estimate, scipy)
or Markov chain Monte Carlo (MCMC, emcee) (default='MLE').
kwargs:
for MLE:
scipy_fit_options : dict, optional
Special scipy fit options like <fc>, <loc>, or <floc>.
For GPD scipy_fit_options=dict(floc=0) by default (fixed location parameter at 0).
This parameter is carried over to further calculations, such as confidence interval.
for MCMC:
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to fit')
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
if fit_method == 'MLE':
if distribution_name == 'genpareto':
self.scipy_fit_options = kwargs.pop('scipy_fit_options', dict(floc=0))
else:
self.scipy_fit_options = kwargs.pop('scipy_fit_options', {})
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Create local distribution object
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
self.fit_parameters = distribution_object.fit(exceedances, **self.scipy_fit_options)
elif fit_method == 'MCMC':
self.mcmc_chain = self.__run_mcmc(distribution_name, **kwargs)
else:
raise ValueError(f'Fit method {fit_method} not recognized')
# On successful fit assign the fit_ variables
self.fit_method = fit_method
self.distribution_name = distribution_name
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
def __run_mcmc(self, distribution_name, nsamples=1000, nwalkers=200, **kwargs):
"""
Runs emcee Ensemble Sampler to sample posteriot probability of fit parameters given observed data.
Returns sampler chain with <nsamples> for each parameter for each <nwalkers>.
See http://dfm.io/emcee/current/
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
kwargs
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
Returns
-------
Generates an np.ndarray in self.mcmc_chain
Ensemble Sampler chain with <nsamples> for each parameter for each <nwalkers>.
"""
log_prior = kwargs.pop('log_prior', None)
log_likelihood = kwargs.pop('log_likelihood', None)
starting_bubble = kwargs.pop('starting_bubble', 1e-2)
starting_position = kwargs.pop('starting_position', None)
if distribution_name == 'genpareto':
self.fixed_parameters = kwargs.pop('fixed_parameters', [(1, 0)])
else:
self.fixed_parameters = kwargs.pop('fixed_parameters', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if self.fixed_parameters == [(1, 0)] and distribution_name == 'genpareto':
pass
else:
if self.fixed_parameters is not None:
if (log_prior is None) or (log_likelihood is None) or (starting_position is None):
raise ValueError(
'<fixed_parameter> only works with custom prior and likelihood functions.\n'
'Starting position should be provided for the fixed_parameters case'
)
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Define log_prior probability function (uniform by default)
if log_prior is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_prior(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return 0
else:
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-prior function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_prior=>.'
)
# Define log_likelihood function
if log_likelihood is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_likelihood(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return np.sum(distribution_object.logpdf(exceedances, shape, 0, scale))
else:
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-likelihood function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_likelihood=>.'
)
# Define log_posterior probability function (not exact - excludes marginal evidence probability)
def log_posterior(theta):
return log_likelihood(theta) + log_prior(theta)
# Set MCMC walkers' starting positions to 0
# (setting to MLE makes algorithm unstable due to being stuck in local maxima)
if starting_position is None:
if distribution_name == 'genpareto' and self.fixed_parameters == [(1, 0)]:
theta_0 = np.array([0, 0])
elif distribution_name in ['genextreme', 'genpareto']:
theta_0 = np.array([0, 0, 0])
else:
theta_0 = distribution_object.fit(exceedances)
starting_position = [[0] * len(theta_0) for _ in range(nwalkers)]
# Randomize starting positions to force walkers explore the parameter space
starting_position = [
np.array(sp) + starting_bubble * np.random.randn(len(starting_position[0]))
for sp in starting_position
]
if len(starting_position) != nwalkers:
raise ValueError(f'Number of starting positions {len(starting_position)} '
f'must be equal to number of walkers {nwalkers}')
ndim = len(starting_position[0])
# Setup the Ensemble Sampler and draw samples from posterior distribution for specified number of walkers
self.__sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior)
self.__sampler.run_mcmc(starting_position, nsamples)
# Fill in fixed parameter values
sampler_chain = self._EVA__sampler.chain.copy()
if self.fixed_parameters is not None:
fp = np.transpose(self.fixed_parameters)
ndim = sampler_chain.shape[-1] + len(self.fixed_parameters)
mcmc_chain = np.array(
[
[
[np.nan] * ndim for _ in range(sampler_chain.shape[1])
] for _ in range(sampler_chain.shape[0])
]
)
for i in range(mcmc_chain.shape[0]):
for j in range(mcmc_chain.shape[1]):
counter = 0
for k in range(mcmc_chain.shape[2]):
if k in fp[0]:
mcmc_chain[i][j][k] = fp[1][fp[0] == k][0]
else:
mcmc_chain[i][j][k] = sampler_chain[i][j][counter]
counter += 1
sampler_chain = np.array(mcmc_chain)
return sampler_chain
def _kernel_fit_parameters(self, burn_in, kernel_steps=1000):
"""
Estimate mode of each parameter as peaks of gaussian kernel.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Returns
-------
np.ndarray
Modes of parameters.
"""
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method != 'MCMC':
raise ValueError('Fit method must be MCMC')
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
# Estimate mode of each parameter as peaks of gaussian kernel.
parameters = []
for i, p in enumerate(samples.T):
if self.fixed_parameters is None or (i not in np.transpose(self.fixed_parameters)[0]):
p_filtered = p[~np.isnan(p)]
kernel = scipy.stats.gaussian_kde(p_filtered)
support = np.linspace(
np.quantile(p_filtered, .1), np.quantile(p_filtered, .9),
kernel_steps
)
density = kernel.evaluate(support)
parameters.append(support[density.argmax()])
else:
parameters.append(p[0])
return np.array(parameters)
def plot_trace(self, burn_in, true_theta=None, labels=None):
"""
Plots traces for each parameter. Each trace plot shows all samples for each walker
after first <burn_in> samples are discarded. This method is used to verify fit stability
and to determine the optimal <burn_in> value.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
true_theta : array_like, optional
Array with true (known) values of parameters (default=None). If given, are shown on trace plots.
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
Returns
-------
tuple(fig, axes)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if labels is None:
labels = [f'Parameter {i+1}' for i in range(self.__sampler.chain.shape[-1])]
# Generate trace plot
ndim = self.__sampler.chain.shape[-1]
with plt.style.context('bmh'):
fig, axes = plt.subplots(ndim, 1, figsize=(12, 8), sharex='all')
if ndim == 1:
axes.set_title('MCMC Trace Plot')
axes.set_xlabel('Sample number')
else:
axes[0].set_title('MCMC Trace Plot')
axes[-1].set_xlabel('Sample number')
for i in range(ndim):
for swalker in self.__sampler.chain:
if ndim == 1:
axes.plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes.set_ylabel(labels[i])
else:
axes[i].plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes[i].set_ylabel(labels[i])
if true_theta is not None:
if ndim == 1:
axes.axhline(true_theta[i], color='orangered', lw=2, zorder=10)
else:
axes[i].axhline(true_theta[i], color='orangered', lw=2, zorder=10)
fig.tight_layout()
return fig, axes
def plot_corner(self, burn_in, bins=100, labels=None, figsize=(12, 12), **kwargs):
"""
Generate corner plot showing the projections of a data set in a multi-dimensional space.
See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
bins : int, optional
See https://corner.readthedocs.io/en/latest/api.html#corner.corner (default=50).
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
figsize : tuple, optional
Figure size (default=(12, 12)).
kwargs
Corner plot keywords. See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Returns
-------
tuple(fig, ax)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('mcmc_chain attribute not found')
# Generate labels
ndim = self.__sampler.chain.shape[-1]
if labels is None:
labels = np.array([f'Parameter {i + 1}' for i in range(ndim)])
samples = self.__sampler.chain[:, burn_in:, :].reshape((-1, ndim)).copy()
# Generate corner plot
fig, ax = plt.subplots(ndim, ndim, figsize=figsize)
fig = corner.corner(samples, bins=bins, labels=labels, fig=fig, **kwargs)
return fig, ax
def plot_posterior(self, rp, burn_in, alpha=.95, plot=True, kernel_steps=1000, bins=100):
"""
Returns posterior distribution of return value for a specific return period.
Can be used to explore the posterior distribution p(rv|self.extremes).
Parameters
----------
rp : float
Return period (1/rp represents probability of exceedance over self.block_size).
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
alpha : float, optional
Shows confidence bounds for given interval alpha (default=.95). Doesn't show if None.
plot : bool, optional
If True, plots histogram of return value (default=True). If False, return data
kernel_steps : int, optional
Number of bins (kernel support points) used to plot kernel density (default=1000).
bins : int, optional
Number of bins in historgram (default=100). Only when plot=True.
Returns
-------
Distribution of return value for a given return period
if plot = True : tuple(fig, ax)
if plot = Fale : np.ndarray
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if not np.isscalar(rp):
raise ValueError('rp must be scalar')
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return value for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Set up gaussian kernel
support = np.linspace(return_values.min(), return_values.max(), kernel_steps)
kernel = scipy.stats.gaussian_kde(return_values)
density = kernel.evaluate(support)
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=.5, ls='--', zorder=10
)
ax.plot(
support, density,
color='k', lw=2, zorder=15
)
if alpha is not None:
ax.axvline(np.nanquantile(return_values, (1 - alpha) / 2), lw=1, color='k', ls='--')
ax.axvline(np.nanquantile(return_values, (1 + alpha) / 2), lw=1, color='k', ls='--')
if self.extremes_type == 'high':
ax.set_xlim(right=np.nanquantile(return_values, .999))
else:
ax.set_xlim(left=np.nanquantile(return_values, .001))
ax.set_title(f'{rp}-year Return Period Posterior Distribution')
ax.set_xlabel('Return value')
ax.set_ylabel('Probability density')
fig.tight_layout()
return fig, ax
else:
return return_values
def return_value(self, rp, **kwargs):
"""
Calculates return values for given return periods.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
float or array of floats
Return values for given return periods.
"""
return self.isf(1 / rp / self.extremes_rate, **kwargs)
def confidence_interval(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals for given return periods.
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
if self.fit_method == 'MLE':
method = kwargs.pop('method', 'Monte Carlo')
if method == 'Monte Carlo':
return self.__monte_carlo(rp=rp, alpha=alpha, **kwargs)
elif method == 'Delta':
return self.__delta(rp=rp, alpha=alpha, **kwargs)
elif method in ['Profile Likelihood']:
# TODO - implement Profile Likelihood mehtod
raise NotImplementedError(f'Method {method} not implemented')
else:
raise ValueError(f'Method {method} not recognized')
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
alpha = kwargs.pop('alpha', .95)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return values for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Calculate quantiles for lower and upper confidence bounds for each return period
if np.isscalar(rp):
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def __monte_carlo(self, rp, alpha=.95, **kwargs):
"""
Runs the Monte Carlo confidence interval estimation method.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
k = kwargs.pop('k', 1e4)
sampling_method = kwargs.pop('sampling_method', 'constant')
source = kwargs.pop('source', 'data')
assume_normality = kwargs.pop('assume_normality', False)
# TODO - implement a discard rule (discard bad samples)
# discard_rule = kwargs.pop('discard_rule', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
# Sample from data case
if source == 'data':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
elif sampling_method == 'jacknife':
sample_rate = (len(self.extremes) - 1) / self.number_of_blocks
return_values = []
for i in range(len(self.extremes)):
sample = np.delete(arr=exceedances, obj=i)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=data> the sampling method must be <constant>, <poisson>, or <jacknife>,'
f' <{sampling_method}> was passed')
# Sample from distribution (parametric) case
elif source == 'parametric':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=parametric> the sampling method must be <constant> or <poisson>,'
f' <{sampling_method}> was passed')
else:
raise ValueError(f'source must be either <data> or <parametric>, <{source}> was passed')
# Estimate confidence bounds for sampled return values
return_values = np.array(return_values)
if np.isscalar(rp):
if assume_normality:
return scipy.stats.norm.interval(
alpha=alpha, loc=np.nanmean(return_values), scale=np.nanstd(return_values, ddof=1)
)
else:
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
if assume_normality:
locations = np.array([np.nanmean(row) for row in return_values.T])
scales = np.array([np.nanstd(row, ddof=1) for row in return_values.T])
return np.transpose(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=scale)
for loc, scale in zip(locations, scales)
]
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
def __delta(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals using the delta method. Assumes asymptotic normality.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
dx = kwargs.pop('dx', '1e-10')
precision = kwargs.pop('precision', 100)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
# Check if a custom distribution with mpmath backend is defined
if self.distribution_name in coastlib.stats.distributions.distributions:
distribution_object = getattr(coastlib.stats.distributions, self.distribution_name)
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
# Account for custom fit parameters (custom genextreme has negative shape in scipy)
if self.distribution_name == 'genextreme':
fit_parameters = self.fit_parameters * np.array([-1, 1, 1])
elif self.distribution_name in ['genpareto']:
fit_parameters = self.fit_parameters
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Generalized Pareto Distribution
if self.distribution_name == 'genpareto':
if self.scipy_fit_options != dict(floc=0):
raise ValueError(
f'Delta method for genpareto is implemented only for the case of '
f'fixed location parameter {dict(floc=0)}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
with mpmath.workdps(precision):
# Define modified log_likehood function (only shape and scale, location is fixed)
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=x, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
) for x in exceedances
]
)
# Calculate covariance matrix of shape and scale
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, dx=dx, precision=precision,
coordinates=(fit_parameters[0], fit_parameters[2])
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Modify covariance matrix to include uncertainty in threshold exceedance probability
modified_covariance = np.zeros((3, 3))
modified_covariance[1:, 1:] = covariance
# Probability of exceeding threshold for all observations
eta_0 = len(self.extremes) / len(self.dataframe)
# Number of observations per year
ny = len(self.dataframe) / self.number_of_blocks
modified_covariance[0][0] = eta_0 * (1 - eta_0) / len(self.dataframe)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(eta, *theta):
q = 1 / (rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2])
)
loc = np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
variance = np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(eta, *theta):
q = 1 / (_rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2]),
)
locs.append(
np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
)
variances.append(
np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
# Generalized Extreme Distribtuion
elif self.distribution_name == 'genextreme':
if self.scipy_fit_options != {}:
raise ValueError(
f'Delta method for genextreme is implemented only for the case of '
f'unbound parameters {dict()}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = distribution_object.observed_information(
exceedances, *fit_parameters, dx=dx, precision=precision
).astype(np.float64)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(*theta):
q = 1 / rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
loc = np.float64(scalar_function(*fit_parameters))
variance = np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(*theta):
q = 1 / _rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
locs.append(np.float64(scalar_function(*fit_parameters)))
variances.append(
np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
def generate_results(self, rp=None, alpha=.95, **kwargs):
"""
Generates a self.results dataframe with return values and, optionally, confidence intervals.
Used to generate data for output and reporting purpose (run the self.restuls.to_excel()) and to
produce a probability plot (summary).
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
By default is an array of return periods equally spaced on a log-scale from 0.001 to 1000.
alpha : float, optional
Confidence interval bounds (default=.95). Doesn't estimate confidence intervals if None.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
ci_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
ci_kwargs
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to
number of extracted extreme values
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
Creates a <self.results> dataframe with return values and, optionally, confidence intervals
for each given return period.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if rp is None:
rp = np.unique(
np.append(
np.logspace(-3, 3, 200),
[1/12, 7/365.2425, 1, 2, 5, 10, 25, 50, 100, 200, 250, 500, 1000]
)
)
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
if np.isscalar(rp):
rp = np.array([rp])
else:
rp = np.array(rp)
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
ci_kwargs = kwargs.pop('ci_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
ci_kwargs = kwargs.pop('ci_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_values = self.return_value(rp, **rv_kwargs)
self.results = pd.DataFrame(
data=return_values, index=rp, columns=['Return Value']
)
self.results.index.name = 'Return Period'
if alpha is not None:
ci_lower, ci_upper = self.confidence_interval(rp=rp, alpha=alpha, **ci_kwargs)
if np.isscalar(ci_lower):
ci_lower, ci_upper = np.array([ci_lower]), np.array([ci_upper])
else:
ci_lower, ci_upper = np.array(ci_lower), np.array(ci_upper)
self.results[f'{alpha*100:.0f}% CI Lower'] = ci_lower
self.results[f'{alpha*100:.0f}% CI Upper'] = ci_upper
# Remove bad values from the results
if self.extremes_type == 'high':
mask = self.results['Return Value'].values >= self.extremes[self.column].values.min()
else:
mask = self.results['Return Value'].values <= self.extremes[self.column].values.max()
self.results = self.results[mask]
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=True
)
self.__update()
def pdf(self, x, **kwargs):
"""
Estimates probability density at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of probability densities at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='pdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='pdf', **kwargs)
def cdf(self, x, **kwargs):
"""
Estimates cumulative probability at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the cumulative probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of cumulative probability at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='cdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='cdf', **kwargs)
def ppf(self, q, **kwargs):
"""
Estimates ppf (inverse cdf or quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the ppf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of ppf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='ppf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='ppf', **kwargs)
def isf(self, q, **kwargs):
"""
Estimates isf (inverse survival or upper quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the isf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of isf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='isf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='isf', **kwargs)
def ___get_property(self, x, prop, **kwargs):
"""
Estimates property (pdf, cdf, ppf, etc.) at value <x> using the fitted distribution parameters.
Parameters
----------
x : float or iterable
Value at which the property is estimated.
prop : str
Scipy property to be estimated (pdf, ppf, isf, cdf, rvs, etc.).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of property at <x>
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
distribution_object = getattr(scipy.stats, self.distribution_name)
property_function = getattr(distribution_object, prop)
if not np.isscalar(x):
x = np.array(x)
if self.fit_method == 'MLE':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return property_function(x, *self.fit_parameters)
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
estimate_method = kwargs.pop('estimate_method', 'parameter mode')
if estimate_method not in ['parameter mode', 'value mode', 'value quantile']:
raise ValueError(f'Estimate method <{estimate_method}> not recognized')
if estimate_method in ['parameter mode', 'value mode']:
kernel_steps = kwargs.pop('kernel_steps', 1000)
else:
kernel_steps = None
if estimate_method == 'value quantile':
quantile = kwargs.pop('quantile', .5)
else:
quantile = None
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Estimate mode of each parameter as peaks of gaussian kernel.
# Use estimated parameters to calculate property function
if estimate_method == 'parameter mode':
parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
return property_function(x, *parameters)
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
property_samples = np.array([property_function(x, *_theta) for _theta in samples])
# Estimate property function as mode of distribution of property value
# for all samples in self.mcmc_chain as peaks of gaussian kernel.
if estimate_method == 'value mode':
if np.isscalar(x):
if np.all(np.isnan(property_samples)):
return np.nan
else:
ps_filtered = property_samples[~np.isnan(property_samples)]
if np.all(ps_filtered == ps_filtered[0]):
return np.nan
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
return support[density.argmax()]
else:
estimates = []
for ps in property_samples.T:
if np.all(np.isnan(ps)):
estimates.append(np.nan)
else:
ps_filtered = ps[~np.isnan(ps)]
if np.all(ps_filtered == ps_filtered[0]):
estimates.append(np.nan)
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
estimates.append(support[density.argmax()])
return np.array(estimates)
# Estimate property function as quantile of distribution of property value
# for all samples in self.mcmc_chain.
elif estimate_method == 'value quantile':
if np.isscalar(quantile):
if quantile <= 0 or quantile > 1:
raise ValueError(f'Quantile must be in range (0,1], quantile={quantile} was passed')
else:
raise ValueError(f'Quantile must be scalar, {type(quantile)} was passed')
if np.isscalar(x):
return np.nanquantile(a=property_samples, q=quantile)
else:
return np.array(
[
np.nanquantile(a=row, q=quantile) for row in property_samples.T
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def plot_summary(self, support=None, bins=10, plotting_position='Weibull', **kwargs):
"""
Plots projected return values, pdf, and cdf values against observed.
Parameters
----------
support : array_like, optional
Values used to estimate pdf and cdf. By default is 100 linearly spaced min to max extreme values.
bins : int, optional
Number of bins used to plot cdf and pdf histograms (default=10).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
tuple(fig, ax1, ax2, ax3)
Figure, return value, pdf, cdf axes.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['results']:
raise UnboundLocalError('No data found. Generate results by runing self.generate_results() method first')
if support is None:
support = np.linspace(
self.extremes[self.column].values.min(), self.extremes[self.column].values.max(), 100
)
if self.fit_method == 'MCMC':
rv_kwargs = kwargs.pop('rv_kwargs')
else:
rv_kwargs = {}
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_period = self.__get_return_period(plotting_position=plotting_position)
with plt.style.context('bmh'):
# Setup canvas
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0))
ax3 = plt.subplot2grid((2, 2), (1, 1))
# Plot return values
ax1.set_title('Return Value Plot')
ax1.set_ylabel(f'{self.column}')
ax1.set_xlabel(f'Return period')
ax1.plot(
self.results.index, self.results['Return Value'].values,
color='k', lw=2, zorder=15, label='Central estimate'
)
if len(self.results.columns) == 3:
ax1.plot(
self.results.index, self.results[self.results.columns[1]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.plot(
self.results.index, self.results[self.results.columns[2]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.fill_between(
self.results.index, self.results[self.results.columns[1]],
self.results[self.results.columns[2]],
alpha=.1, color='k',
label=f'{self.results.columns[1].split("%")[0]}% confidence interval', zorder=5
)
points = ax1.scatter(
return_period, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=15,
label=f'Observed extreme event\n{plotting_position} plotting position'
)
ax1.semilogx()
ax1.grid(b=True, which='minor', axis='x')
ax1.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%.0f'))
ax1.legend()
annot = ax1.annotate(
"", xy=(self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean()),
xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax1.scatter(
self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean(),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_period[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax1:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
# Plot PDF
ax2.set_ylabel('Probability density')
ax2.set_xlabel(f'{self.column}')
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax2.plot(
support, self.pdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax2.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax2.set_ylim(0)
# Plot CDF
ax3.set_ylabel('Cumulative probability')
ax3.set_xlabel(f'{self.column}')
if self.extremes_type == 'high':
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
else:
_, boundaries = np.histogram(self.extremes[self.column].values, bins)
centers = np.array([(boundaries[i] + boundaries[i - 1]) / 2 for i in range(1, len(boundaries))])
densities = []
for i, c in enumerate(centers):
mask = self.extremes[self.column].values >= boundaries[i]
densities.append(np.sum(mask) / len(self.extremes))
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', alpha=0.2, zorder=5
)
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax3.plot(
support, self.cdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax3.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax3.set_ylim(0)
fig.tight_layout()
return fig, ax1, ax2, ax3
def plot_qq(self, k, plot=True, plotting_position='Weibull', quantiles=True, **kwargs):
"""
Plots theoretical quantiles (probabilites) agains observed quantiles (probabilites).
Parameters
----------
k : int
Number of estimated (non-fixed) parameters in the distribution.
plot : bool, optional
Generates plot if True, returns data if False (default=True).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
quantiles : bool, optional
If True, produces a quantile plot (Q-Q, ppf) (default=True).
If False, produces a probability plot (P-P, cdf).
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
tuple((theoretical, observed), (r, p))
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
ecdf = self.__get_return_period(plotting_position=plotting_position, return_cdf=True)
return_periods = self.__get_return_period(plotting_position=plotting_position)
# Estimate theoretical values based on returned quantiles
if quantiles:
theoretical = self.ppf(ecdf, **rv_kwargs)
else:
theoretical = self.cdf(self.extremes[self.column].values, **rv_kwargs)
theoretical[np.isinf(theoretical)] = np.nan
mask = ~np.isnan(theoretical)
if quantiles:
r, p = scipy.stats.pearsonr(self.extremes[self.column].values[mask], theoretical[mask])
else:
r, p = scipy.stats.pearsonr(ecdf, theoretical[mask])
r = np.sqrt(
1 - (1 - r ** 2) * (len(theoretical[mask]) - 1) / (len(theoretical[mask]) - (k + 1))
)
if plot:
with plt.style.context('bmh'):
# Quantile plot
if quantiles:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Quantile Plot')
plt.xlabel(r'Theoretical quantiles')
plt.ylabel(rf'Observed quantiles, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0]+self.threshold, self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
# Probability plot
else:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, ecdf,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Probability Plot')
plt.xlabel(r'Theoretical probabilities')
plt.ylabel(rf'Observed probabilities, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0], self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
if quantiles:
return (
(theoretical, self.extremes[self.column].values),
(r, p)
)
else:
return (
(theoretical, ecdf),
(r, p)
)
def goodness_of_fit(self, method, **kwargs):
"""
Calculates various goodness-of-fit statistics for selected model.
Parameters
----------
method : str
Goodness of fit statistic method.
Supported methods:
'AIC' - Akaike information criterion
Lower value corresponds to a better fit.
see https://en.wikipedia.org/wiki/Akaike_information_criterion
'log-likelihood' - log-likelihood
Higher value corresponds to a better fit.
'KS' - Kolmogorov Smirnot test
Null hypothesis - both samples come from the same distribution.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html
'chi-square' - Chi-Square test
Null hypothesis - both samples come from the same distribution.
Calculates theoretical counts for given quantile ranges and compares to theoretical.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
for AIC
order : int, optional
Order of AIC (1 for regular, 2 for small samples) (default=2).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
fot KS
mode : str, optional
See scipy docs (default='approx').
alternative : str, optional
See scipy docs (default='two-sided').
for chi-square
chi_quantiles : int, optional
Number of equal slices (quantiles) into which observed data is split
to calculate the stitistic(default=4).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
Returns
-------
if method = 'log-likelihood' : float, log-likelihood
if method = 'AIC' : float, AIC statistic
if method = 'KS' : tuple(statistic, p-value)
if method = 'chi-square' : tuple(statistic, p-value)
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
fit_parameters = self.fit_parameters
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
kernel_steps = kwargs.pop('kernel_steps', 1000)
fit_parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
else:
raise RuntimeError(f'Unexpected fit_method {self.fit_method}')
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
log_likelihood = np.sum(
distribution_object.logpdf(exceedances, *fit_parameters)
)
if method == 'log-likelihood':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return log_likelihood
elif method == 'AIC':
order = kwargs.pop('order', 2)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
aic = 2 * k - 2 * log_likelihood
if order == 1:
return aic
elif order == 2:
return aic + (2 * k ** 2 + 2 * k) / (len(self.extremes) - k - 1)
else:
raise ValueError(f'order must be 1 or 2, {order} was passed')
elif method == 'KS':
mode = kwargs.pop('mode', 'approx')
alternative = kwargs.pop('alternative', 'two-sided')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
ks, p = scipy.stats.kstest(
rvs=exceedances, cdf=distribution_object.cdf, args=fit_parameters,
alternative=alternative, mode=mode
)
return ks, p
elif method == 'chi-square':
chi_quantiles = kwargs.pop('chi_quantiles', 4)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
chi_quantile_ranges = [1 / chi_quantiles * (i + 1) for i in np.arange(-1, chi_quantiles)]
observed_counts, expected_counts = [], []
for i in range(chi_quantiles):
bot = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i]
)
top = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i + 1]
)
if i + 1 == chi_quantiles:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] <= top)
]
)
)
else:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] < top)
]
)
)
expected_counts.append(
len(self.extremes) * (self.cdf(top) - self.cdf(bot))
)
if min(observed_counts) <= 5 or min(expected_counts) <= 5:
raise ValueError(f'Too few observations in observed counts {min(observed_counts)} '
f'or expected counts {min(expected_counts):.0f}, reduce chi_quantiles')
cs, p = scipy.stats.chisquare(f_obs=observed_counts, f_exp=expected_counts, ddof=k)
return cs, p
else:
raise ValueError(f'Method {method} not recognized')
if __name__ == "__main__":
# Load data and initialize EVA
import os
df = pd.read_csv(
os.path.join(os.getcwd(), r'test data\Battery_residuals.csv'),
index_col=0, parse_dates=True
)
self = EVA(dataframe=df, column='Residuals (ft)', block_size=365.25, gap_length=24)
# Set up test parameters
etype = 'high'
extremes_method = 'POT'
_method = 'MCMC'
mle_ci = 'Delta'
if extremes_method == 'POT':
_distribution = 'genpareto'
elif extremes_method == 'BM':
_distribution = 'genextreme'
else:
raise RuntimeError
# Run a series of methods to assist in finding optimal threshold
if extremes_method == 'POT':
if etype == 'high':
self.plot_mean_residual_life(
thresholds=np.arange(2, 8, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
self.plot_parameter_stability(
thresholds=np.arange(3, 8, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
elif etype == 'low':
self.plot_mean_residual_life(
thresholds=np.arange(-8, -2, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='low'
)
self.plot_parameter_stability(
thresholds=np.arange(-8, -2.5, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=20, extremes_type='low'
)
# Extract extreme values
if extremes_method == 'BM':
self.get_extremes(method='BM', plotting_position='Weibull', extremes_type=etype)
elif extremes_method == 'POT':
if etype == 'high':
self.get_extremes(method='POT', threshold=3, r=24*7, plotting_position='Weibull', extremes_type='high')
elif etype == 'low':
self.get_extremes(method='POT', threshold=-2.8, r=24*7, plotting_position='Weibull', extremes_type='low')
self.plot_extremes()
# Test independence of POT extremes
if extremes_method == 'POT':
self.test_extremes(method='autocorrelation')
self.test_extremes(method='lag plot', lag=1)
print(self.test_extremes(method='runs test', alpha=0.05))
# Fit distribution
if _method == 'MLE':
if _distribution == 'genpareto':
# Shape (f0) and location (floc) are both 0 => equivalent to exponential distribution (expon with floc=0)
self.fit(distribution_name=_distribution, fit_method='MLE', scipy_fit_options=dict(floc=0))
elif _distribution == 'genextreme':
self.fit(distribution_name=_distribution, fit_method='MLE')
elif _method == 'MCMC':
self.fit(
distribution_name=_distribution, fit_method='MCMC',
nsamples=1000, nwalkers=200, starting_bubble=.01
)
# Trace plot
if _distribution == 'genpareto':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\sigma$'])
elif _distribution == 'genextreme':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'])
if _distribution == 'genpareto':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\sigma$'], smooth=1)
elif _distribution == 'genextreme':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'], smooth=1)
# Test quality of fit
if _method == 'MLE':
print(self.goodness_of_fit(method='AIC', k=1))
self.plot_qq(k=2, plotting_position='Weibull', quantiles=True)
self.plot_qq(k=2, plotting_position='Weibull', quantiles=False)
else:
_burn_in = 200
print(self.goodness_of_fit(method='AIC', k=2, burn_in=_burn_in, kernel_steps=100))
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=True,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=False,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
# Generate results
if _method == 'MCMC':
_burn_in = 200
self.generate_results(
alpha=.95,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100),
ci_kwargs=dict(burn_in=_burn_in)
)
elif _method == 'MLE':
if mle_ci == 'Monte Carlo':
self.generate_results(
alpha=.95,
ci_kwargs=dict(
method='Monte Carlo', k=100, source='data', sampling_method='constant', assume_normality=False
)
)
elif mle_ci == 'Delta':
self.generate_results(alpha=.95, ci_kwargs=dict(method='Delta', dx='1e-10', precision=100))
# Plot extremes return plot
if _method == 'MCMC':
_burn_in = 200
self.plot_summary(
bins=10, plotting_position='Gringorten',
rv_kwargs=dict(burn_in=200, estimate_method='parameter mode', kernel_steps=100)
)
elif _method == 'MLE':
self.plot_summary(bins=10, plotting_position='Gringorten')
|
georgebv/coastlib
|
coastlib/stats/extreme.py
|
Python
|
gpl-3.0
| 165,750
|
[
"Gaussian"
] |
6bff41fdbc981d317beda312a6d7cab2d018d85709c29ffe7560f4cee2453711
|
"""testCount3.py.
Written by: Brian O'Dell, October 2017
A program to run each program a 500 times per thread count.
Then uses the data collected to make graphs and tables that
are useful to evaluate the programs running time.
"""
from subprocess import *
import subprocess
from numba import jit
import numpy as np
import csv as csv
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
@jit
def doCount(name):
"""Do multiple executions of the program.
Call the C program multiple times with variable arguments to gather data
The name of the executable should exist before running
"""
j = 512
while (j < 1025):
for i in range(0, 501):
p = subprocess.Popen(name + " -w -t " + str(j), shell=True,
stdout=subprocess.PIPE)
print(str(p.stdout.read()))
# call([name, "-t", str(j), "-w"])
if (j == 0):
j = 1
else:
j = 2*j
@jit
def exportData(name):
"""Turn the data into something meaningful.
Takes all the data gets the average and standard deviation for each
number of threads. Then plots a graph based on it. Also, makes
a csv with the avg and stddev
"""
DF = pd.read_csv(name + ".csv")
f = {'ExecTime': ['mean', 'std']}
# group by the number of threads in the csv and
# apply the mean and standard deviation functions to the groups
avgDF = DF.groupby('NumThreads').agg(f)
avgTable = DF.groupby('NumThreads', as_index=False).agg(f)
# When the data csv was saved we used 0 to indicate serial execution
# this was so the rows would be in numerical order instead of Alphabetical
# Now rename index 0 to Serial to be an accurate representation
indexList = avgDF.index.tolist()
indexList[0] = 'Serial'
avgDF.index = indexList
# make the bar chart and set the axes
avgPlot = avgDF.plot(kind='bar', title=('Run Times Using ' + name),
legend='False', figsize=(15, 8))
avgPlot.set_xlabel("Number of Threads")
avgPlot.set_ylabel("Run Time (seconds)")
# put the data values on top of the bars for clarity
avgPlot.legend(['mean', 'std deviation'])
for p in avgPlot.patches:
avgPlot.annotate((str(p.get_height())[:6]),
(p.get_x()-.01, p.get_height()), fontsize=9)
# save the files we need
plt.savefig(name + 'Graph.png')
avgTable.to_csv(name + 'Table.csv', index=False, encoding='utf-8')
def main():
"""Do both functions."""
#doCount("java Count3")
exportData("javaRuntimes")
if __name__ == '__main__':
main()
|
brian-o/CS-CourseWork
|
CS491/count3Java/testCount3java.py
|
Python
|
gpl-3.0
| 2,658
|
[
"Brian"
] |
1f95f2e071a7bf4e6af00478f00783753aaa19d83e500a050f73ee994d60b9e0
|
#!/usr/bin/env python2.7
import pysam, argparse, sys
parser = argparse.ArgumentParser(description='Helps with genome curation using linked paired-end read information from subsetted regions of a genome.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-b', help= 'indexed bam file required for random read access', type=str, required=True)
#required.add_argument('-s', help= 'QNAME sorted SAM file required for mate pair lookups', type=str, required=True)
required.add_argument('-f', help= 'indexed fasta file required for random sequence access', type=str, required=True)
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', '--help', action="help", help="show this help message and exit")
optional.add_argument('-c', help= 'contig/scaffold for read detection', type=str)
optional.add_argument('--min', help= 'minimum on scaffold', type=int, default=0)
optional.add_argument('--max', help= 'maximum on scaffold', type=int, default=sys.maxint)
optional.add_argument('--base', help= 'N-base for coordinate access', type=int, choices= [0,1], default=0)
optional.add_argument('--connectivity', help= 'outputs reads that should span multiple scaffolds and the regions they occur', action="store_false")
optional.add_argument('--threshold', help= 'minimum number of reads bridging scaffolds required to print', type=int, default=0)
args = parser.parse_args()
args.min, args.max= args.min-args.base, args.max-args.base
bamfile = pysam.AlignmentFile(args.b, "rb")
fastafile= pysam.FastaFile(args.f)
sys.stderr.write("Reading BAM file into mate-based dictionary...\t")
read_dict= {}
for read in bamfile:
read_dict[read.query_name + ("-1" if read.is_read1 else "-2")]= read
sys.stderr.write("finished!\n")
sys.stderr.write("Piling BAM file...\t")
piled_columns = bamfile.pileup(args.c, args.min, (args.max if args.max != sys.maxint else fastafile.get_reference_length(args.c)))
sys.stderr.write("finished!\n")
if args.connectivity:
for piled_column in piled_columns:
if piled_column.reference_pos < args.min or piled_column.reference_pos > args.max:
continue
if piled_column.reference_pos % 1000 == 0:
sys.stderr.write("Processing column {0}\n".format(str(piled_column.reference_pos)))
if args.threshold == 0:
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
for piled_read in piled_column.pileups:
seg= piled_read.alignment
mate= read_dict[seg.query_name + ("-2" if read.is_read1 else "-1")]
print "\t".join([
seg.query_name,
seg.query_sequence[piled_read.query_position],
seg.cigarstring,
mate.query_name,
("unmapped" if mate.is_unmapped else bamfile.getrname(mate.reference_id)),
("NA" if mate.is_unmapped else str(mate.reference_start)),
("NA" if mate.is_unmapped else mate.cigarstring)
])
continue
bridge_count= 0
segs= []
mates= []
for piled_read in piled_column.pileups:
seg= piled_read.alignment
if seg.is_proper_pair:
continue
mate= read_dict[seg.query_name + ("-2" if read.is_read1 else "-1")]
if bamfile.getrname(mate.reference_id) != bamfile.getrname(seg.reference_id):
bridge_count+= 1
segs.append(seg), mates.append(mate)
if bridge_count >= args.threshold:
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
print "READ\tBASE_CALL\tMAPPED?"
for seg, mate in zip(segs, mates):
print "\t".join([
seg.query_name,
seg.query_sequence[piled_read.query_position],
seg.cigarstring,
mate.query_name,
("unmapped" if mate.is_unmapped else bamfile.getrname(mate.reference_id)),
("NA" if mate.is_unmapped else str(mate.reference_start)),
("NA" if mate.is_unmapped else mate.cigarstring)
])
"""
for piled_column in piled_columns:
if piled_column.reference_pos < args.min or piled_column.reference_pos > args.max:
continue
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
print "READ\tBASE_CALL\tMAPPED?"
for piled_read in piled_column.pileups:
seg= piled_read.alignment
pos = bamfile.tell()
try:
mate= bamfile.mate(seg)
except ValueError:
1
finally:
bamfile.seek(pos)
print "{0}\t{1}\t{2}".format(
seg.query_name,
seg.query_sequence[piled_read.query_position],
("unmapped" if seg.mate_is_unmapped else "{0} is on {1}".format(mate.query_name, bamfile.getrname(mate.reference_id))))
print "\n\n\n"
"""
|
alexherns/biotite-scripts
|
pysam_mismatched.py
|
Python
|
mit
| 5,255
|
[
"pysam"
] |
ad67106210322f2e08543e91a9dc7ea71fd5c5577327e546426287f3ef148a1c
|
import os
import botocore
import ast
import pytest
ROOTDIR = os.path.dirname(botocore.__file__)
def _all_files():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
yield os.path.join(rootdir, filename)
@pytest.mark.parametrize("filename", _all_files())
def test_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
boto/botocore
|
tests/functional/test_six_imports.py
|
Python
|
apache-2.0
| 1,840
|
[
"VisIt"
] |
a49560abb5dae0877f089f1eb035712c11588b7aeac6662b8d1e68f9b43b4d9c
|
# FIXME: to bring back to life
from __future__ import print_function
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
import cmd
import sys
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.AbstractFileSystem import DFCFileSystem
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.DirectoryCompletion import DirectoryCompletion
class DirCompletion(cmd.Cmd):
fc = FileCatalogClient()
dfc_fs = DFCFileSystem(fc)
dc = DirectoryCompletion(dfc_fs)
def do_exit(self, args):
sys.exit(0)
def _listdir(self, args):
if self.dfc_fs.is_dir(args):
return self.dfc_fs.list_dir(args)
else:
return [args]
def _ls(self, args):
try:
return self._listdir(args)
except:
return []
def do_ls(self, args):
print()
print(" ".join(self._ls(args)))
def complete_ls(self, text, line, begidx, endidx):
#print
result = []
cur_input_line = line.split()
#print cur_input_line
cur_path = "/"
if (len(cur_input_line) == 2):
cur_path = cur_input_line[1]
#print "cur_path:", cur_path
result = self.dc.parse_text_line(text, cur_path, "/" )
return result
if __name__ == "__main__":
cli = DirCompletion()
cli.cmdloop()
|
fstagni/DIRAC
|
tests/Integration/DataManagementSystem/FIXME_dfc_dir_completion.py
|
Python
|
gpl-3.0
| 1,254
|
[
"DIRAC"
] |
a58212fc0a3483ce6a61626b086ed228847a3e07ee4a22ec073088f1d8374af4
|
#TODO: Set dbkey to proper UCSC build, if known
import urllib
from galaxy import datatypes, config
import tempfile, shutil
def exec_before_job( app, inp_data, out_data, param_dict, tool=None):
"""Sets the name of the data"""
data_name = param_dict.get( 'name', 'HbVar query' )
data_type = param_dict.get( 'type', 'txt' )
if data_type == 'txt': data_type='interval' #All data is TSV, assume interval
name, data = out_data.items()[0]
data = app.datatypes_registry.change_datatype(data, data_type)
data.name = data_name
out_data[name] = data
def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
"""Verifies the data after the run"""
URL = param_dict.get( 'URL', None )
URL = URL + '&_export=1&GALAXY_URL=0'
if not URL:
raise Exception('Datasource has not sent back a URL parameter')
CHUNK_SIZE = 2**20 # 1Mb
MAX_SIZE = CHUNK_SIZE * 100
try:
page = urllib.urlopen(URL)
except Exception, exc:
raise Exception('Problems connecting to %s (%s)' % (URL, exc) )
name, data = out_data.items()[0]
fp = open(data.file_name, 'wb')
size = 0
while 1:
chunk = page.read(CHUNK_SIZE)
if not chunk:
break
if size > MAX_SIZE:
raise Exception('----- maximum datasize exceeded ---')
size += len(chunk)
fp.write(chunk)
fp.close()
#Set meta data, format file to be valid interval type
if isinstance(data.datatype, datatypes.interval.Interval):
data.set_meta(first_line_is_header=True)
#check for missing meta data, if all there, comment first line and process file
if not data.missing_meta():
line_ctr = -1
temp = tempfile.NamedTemporaryFile('w')
temp_filename = temp.name
temp.close()
temp = open(temp_filename,'w')
chromCol = int(data.metadata.chromCol) - 1
startCol = int(data.metadata.startCol) - 1
strandCol = int(data.metadata.strandCol) - 1
for line in open(data.file_name, 'r'):
line_ctr += 1
fields = line.strip().split('\t')
temp.write("%s\n" % '\t'.join(fields))
temp.close()
shutil.move(temp_filename,data.file_name)
else:
data = app.datatypes_registry.change_datatype(data, 'tabular')
data.set_size()
data.set_peek()
data.flush()
|
dbcls/dbcls-galaxy
|
tools/data_source/hbvar_filter.py
|
Python
|
mit
| 2,585
|
[
"Galaxy"
] |
a62fb767d2f3d7dd14959ca714c985854a8144cb3de0172a8eaabfd7f388e216
|
"""
Contains Experiment and Injection classes.
"""
import os
import logging
import numpy
from pint import DimensionalityError
from bayesitc.units import ureg, Quantity
# Use logger with name of module
logger = logging.getLogger(__name__)
class Injection(object):
"""
Data from a single injection.
Several types of information are stored about each injection:
* the ordinal number of the injection
* the programmed volume of the injection
* duration of the injection
* time between the beginning of the injection and the beginning of the next injection
* filtering period over which data channel is averaged to produce a single measurement of applied power
EXAMPLES
"""
# TODO Add docstring examples.
def __init__(self, number, volume, duration, spacing, filter_period, evolved_heat=None, titrant_amount=None, titrant_concentration=None):
# sequence number of injection
self.number = number
# programmed volume of injection
self.volume = volume
# duration of injection
self.duration = duration
# time between beginning of injection and beginning of next injection
self.spacing = spacing
# time over which data channel is averaged to produce a single measurement
# of applied power
self.filter_period = filter_period
# If provided, set the evolved_heat, making sure the unit is compatible
# with microcalorie
if evolved_heat:
self.evolved_heat = evolved_heat.to('microcalorie')
# the quantity of compound(s) injected
if titrant_amount:
self.titrant = titrant_amount
elif titrant_concentration:
self.contents(titrant_concentration)
else:
TypeError(
"Need to specify either a titrant amount, or a concentration")
def contents(self, titrant_concentration):
"""
Define the contents of what was injected
Takes a list/array of concentrations
"""
# Concentration of syringe contents
self.titrant_concentration = Quantity(
numpy.array(titrant_concentration), ureg.millimole / ureg.liter)
self.titrant = Quantity(
numpy.zeros(self.titrant_concentration.size), ureg.millimole)
for titr in range(self.titrant_concentration.size):
# Amount of titrant in the syringe (mole)
if titr == 0:
self.titrant[titr] = self.volume * self.titrant_concentration
else:
self.titrant[titr] = self.volume * self.titrant_concentration[titr]
class BaseExperiment(object):
"""
Abstract base class for an ITC experiment
"""
def __init__(self, data_source, experiment_name, instrument):
"""
Base init, prepare all the variables
:param data_source:
:type data_source: str
:param experiment_name:
:type experiment_name: str
:return:
:rtype:
"""
# Initialize.
# the source filename from which data is read
self.data_filename = None
self.instrument = instrument # the instrument that was used
self.number_of_injections = None # number of syringe injections
self.target_temperature = None # target temperature
# initial equilibration (delay) time before injections
self.equilibration_time = None
self.stir_speed = None # rate of stirring
self.reference_power = None # power applied to reference cell
# concentrations of various species in syringe
self.syringe_contents = None
# concentrations of various species in sample cell
self.sample_cell_contents = None
self.cell_volume = instrument.V0 # volume of liquid in sample cell
# list of injections (and their associated data)
self.injections = None
# time at end of filtering period
self.filter_period_end_time = None
# time at midpoint of filtering period
self.filter_period_midpoint_time = None
# "differential" power applied to sample cell
self.differential_power = None
self.cell_temperature = None # cell temperature
self.name = experiment_name
self.data_source = data_source
# Extract and store data about the experiment.
self.number_of_injections = None
self.target_temperature = None
self.equilibration_time = None
self.stir_rate = None
self.reference_power = None
# Store additional data about experiment.
self.syringe_concentration = None
# supposed concentration of receptor in cell
self.cell_concentration = None
# Allocate storage for power measurements.
self.time = None
self.heat = None
self.temperature = None
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = None
# "differential" power applied to sample cell (ucal/s)
self.differential_power = None
self.cell_temperature = None # cell temperature (K)
self.jacket_temperature = None # adiabatic jacket temperature (K)
def __str__(self):
"""
Show details of experiment in human-readable form.
"""
# TODO Clean up this definition
string = ""
string += "EXPERIMENT\n"
string += "\n"
string += "Source filename: %s\n" % self.data_filename
string += "Number of injections: %d\n" % self.number_of_injections
string += "Target temperature: %.1f K\n" % (
self.target_temperature / ureg.kelvin)
try:
string += "Equilibration time before first injection: %.1f s\n" % (
self.equilibration_time / ureg.second)
except TypeError:
string += "Equilibration time unknown"
# TODO temporary, needs to be uniform type among all experiment classes
if isinstance(self.syringe_concentration, Quantity):
string += "Syringe concentration: %.3f mM\n" % (self.syringe_concentration / (ureg.millimole / ureg.liter))
if isinstance(self.cell_concentration, Quantity):
string += "Cell concentration: %.3f mM\n" % (self.cell_concentration / (ureg.millimole / ureg.liter))
string += "Cell volume: %.3f ml\n" % (
self.cell_volume / ureg.milliliter)
if isinstance(self.cell_concentration, Quantity):
string += "Reference power: %.3f ucal/s\n" % (self.reference_power / (ureg.microcalorie / ureg.second))
string += "\n"
string += "INJECTIONS\n"
string += "\n"
string += "%16s %24s %24s %24s %24s %24s\n" % (
'injection',
'volume (uL)',
'duration (s)',
'collection time (s)',
'time step (s)',
'evolved heat (ucal)'
)
# for injection in range(self.number_of_injections):
# string += "%16d %16.3f %16.3f %16.3f %16.3f" % (injection, self.injection_volume[injection] / unit.microliter, self.injection_duration[injection] / unit.second, self.collection_time[injection] / unit.second, self.time_step[injection] / unit.second)
for injection in self.injections:
string += "%16d %24.3f %24.3f %24.3f %24.3f %24.3f\n" % (
injection.number,
injection.volume /
ureg.microliter, injection.duration / ureg.second,
injection.spacing / ureg.second, injection.filter_period /
ureg.second, injection.evolved_heat / ureg.microcalorie)
return string
def write_integrated_heats(self, filename):
"""
Write integrated heats in a format similar to that used by Origin.
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s %5s %12s %12s %12s %12s\n" % ("DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (
Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
def write_heats_csv(self, filename):
"""
Write integrated heats in a csv format
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s, %5s, %12s, %12s, %12s, %12s\n" % (
"DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
# TODO do we want all the details, including volumes?
def read_integrated_heats(self, heats_file, unit='microcalorie'):
"""
Read integrated heats from an origin file
:param heats_file:
:type heats_file:
:return:
:rtype:
"""
heats = self._parse_heats(heats_file, unit)
if heats.size != self.number_of_injections:
raise ValueError("The number of injections does not match the number of integrated heats in %s" % heats_file)
for inj, heat in enumerate(heats):
self.injections[inj].evolved_heat = heat
@staticmethod
def _parse_heats(heats_file, unit):
"""
Take as input a file with heats, format specification. Output a list of integrated heats in units of microcalorie
:param heats_file:
:type heats_file:
:param write_heats_compatible:
:type write_heats_compatible:
:return:
:rtype:
"""
import pandas as pd
assert isinstance(heats_file, str)
# Need python engine for skip_footer
dataframe = pd.read_table(heats_file, skip_footer=1, engine='python', sep='\s+', header=0)
heats = numpy.array(dataframe['DH'])
return Quantity(heats, unit)
class ExperimentMicroCal(BaseExperiment):
"""
Data from an ITC experiment.
The experiment consists of several types of data:
* the instrument that was used
* experimental conditions (temperature, stir speed, etc.)
* concentrations of various components in syringe and sample cell
* injection volumes and durations, collection times
* time record of applied power and temperature difference
"""
# TODO Add type verification
def __init__(self, data_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentMicroCal, self).__init__(data_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = list()
# concentrations of various species in sample cell
self.sample_cell_contents = list()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
# cell temperature
self.name = experiment_name
# Check to make sure we can access the file.
if not os.access(data_filename, os.R_OK):
raise "The file '%s' cannot be opened." % data_filename
# Open the file and read is contents.
infile = open(data_filename, 'r')
lines = infile.readlines()
infile.close()
# Check the header to make sure it is a VP-ITC text-formatted .itc
# file.
if lines[0][0:4] != '$ITC':
raise "File '%s' doesn't appear to be a Microcal VP-ITC data file." % data_filename
# Store the datafile filename.
self.data_filename = data_filename
# Extract and store data about the experiment.
self.number_of_injections = int(lines[1][1:].strip())
self.target_temperature = (int(lines[3][1:].strip()) + 273.15) * ureg.kelvin # convert from C to K
self.equilibration_time = int(lines[4][1:].strip()) * ureg.second
self.stir_rate = int(lines[5][1:].strip()) * ureg.revolutions_per_minute
self.reference_power = float(lines[6][1:].strip()) * ureg.microcalorie / ureg.second
# Extract and store metadata about injections.
injection_number = 0
for line in lines[10:]:
if line[0] == '$':
# Increment injection counter.
injection_number += 1
# Read data about injection.
(injection_volume,
injection_duration,
spacing,
filter_period) = line[1:].strip().split(",")
# Extract data for injection and apply appropriate unit
# conversions.
injectiondict = dict()
injectiondict['number'] = injection_number
injectiondict['volume'] = float(injection_volume) * ureg.microliter
injectiondict['duration'] = float(injection_duration) * ureg.second
# time between beginning of injection and beginning of next injection
injectiondict['spacing'] = float(spacing) * ureg.second
# time over which data channel is averaged to produce a single measurement
injectiondict['filter_period'] = float(filter_period) * ureg.second
self.injections.append(Injection(**injectiondict))
else:
break
# Store additional data about experiment.
parsecline = 11 + self.number_of_injections
# supposed concentration of compound in syringe
self.syringe_concentration = {'ligand': float(lines[parsecline][1:].strip()) * ureg.millimole / ureg.liter}
for inj in self.injections:
# TODO add support for multiple components
inj.contents(sum(self.syringe_concentration.values()))
# supposed concentration of receptor in cell
self.cell_concentration = {'macromolecule': float(lines[parsecline + 1][1:].strip()) * ureg.millimole / ureg.liter}
self.cell_volume = float(lines[parsecline + 2][1:].strip()) * ureg.milliliter # cell volume
self.injection_tick = [0]
# Allocate storage for power measurements.
self.time = list()
self.heat = list()
self.temperature = list()
# Extract lines containing heat measurements.
for (index, line) in enumerate(lines):
if line[:2] == '@0':
break
measurement_lines = lines[index:]
# Count number of power measurements.
nmeasurements = 0
for line in measurement_lines:
if line[0] != '@':
nmeasurements += 1
logger.info("There are %d power measurements." % nmeasurements)
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.second)
# "differential" power applied to sample cell (ucal/s)
self.differential_power = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.microcalorie / ureg.second)
# cell temperature (K)
self.cell_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# adiabatic jacket temperature (K)
self.jacket_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# Process data.
# TODO this is a mess, need to clean up and do proper input
# verification
nmeasurements = 0
injection_labels = list()
for (index, line) in enumerate(measurement_lines):
if line[0] == '@':
injection_labels.append(nmeasurements)
else:
# Extract data for power measurement.
# TODO: Auto-detect file format?
#
jacket_temperature = 0.0
try:
(time,
power,
temperature,
a,
jacket_temperature,
c,
d,
e,
f) = line.strip().split(",") # Berkeley Auto iTC-200
except:
try:
# works with Shoichet lab VP-ITC .itc files---what are other readings (a,b,c,d)?
(time,
power,
temperature,
a,
jacket_temperature,
c,
d) = line.strip().split(",")
# b looks like adiabatic jacket temperature (~1 degree C below sample temperature)
except:
# works with David Minh's VP-ITC .itc files
(time, power, temperature) = line.strip().split(",")
# Store data about this measurement.
self.filter_period_end_time[nmeasurements] = float(time) * ureg.second
self.differential_power[nmeasurements] = float(power) * ureg.microcalorie / ureg.second
self.cell_temperature[nmeasurements] = (float(temperature) + 273.15) * ureg.kelvin
self.jacket_temperature[nmeasurements] = (float(jacket_temperature) + 273.15) * ureg.kelvin
nmeasurements += 1
# number of injections read, not including @0
number_of_injections_read = len(injection_labels) - 1
# Perform a self-consistency check on the data to make sure all injections are accounted for.
if number_of_injections_read != self.number_of_injections:
logger.warning("Number of injections read (%d) is not equal to number of injections declared (%d)." % (number_of_injections_read, self.number_of_injections) +
"This is usually a sign that the experimental run was terminated prematurely." +
"The analysis will not include the final %d injections declared." % (self.number_of_injections - number_of_injections_read))
# Remove extra injections.
self.injections = self.injections[0:number_of_injections_read]
self.number_of_injections = number_of_injections_read
logger.debug("self.injections has %d elements" % (len(self.injections)))
# Annotate list of injections.
for injection in self.injections:
injection_number = injection.number
logger.debug("%5d %8d" % (injection_number, injection_labels[injection_number]))
injection.first_index = injection_labels[injection_number]
if injection_number < len(injection_labels) - 1:
injection.last_index = injection_labels[
injection_number + 1] - 1
else:
injection.last_index = nmeasurements - 1
return
def write_power(self, filename):
"""
DEBUG: Write power.
"""
outfile = open(filename, 'w')
outfile.write("%%%7s %16s %16s\n" % ('time (s)', 'heat (ucal/s)', 'temperature (K)'))
for index in range(len(self.filter_period_end_time)):
outfile.write("%8.1f %16.8f %16.8f\n" % (self.filter_period_end_time[index] / ureg.second,
self.differential_power[index] / (ureg.microcalorie / ureg.second),
self.cell_temperature[index] / ureg.kelvin
)
)
outfile.close()
return
@staticmethod
def _plot_confidence_interval(axes, full_x, sigma, y_pred):
# Confidence interval
axes.fill(numpy.concatenate([full_x, full_x[::-1]]),
numpy.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]
]),
alpha=.7, fc='black', ec='None', label='95% confidence interval')
def _plot_gaussian_baseline(self, full_x, full_y, sigma, x, y, y_pred):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Adds a 95% confidence interval to the plot
ExperimentMicroCal._plot_confidence_interval(axes, full_x, sigma, y_pred)
# Entire set of data
axes.plot(full_x, full_y, 'o', markersize=2, lw=1, color='deepskyblue', alpha=.5, label='Raw data')
# Points for fit
axes.plot(x, y, 'o', color='crimson', markersize=2, alpha=.8, label='Fitted data')
# Prediction
axes.plot(full_x, y_pred, 'o', markersize=1, mec='w', mew=1, color='k', alpha=.5, label='Predicted baseline')
# Plot injection time markers.
[ymin, ymax] = axes.get_ybound()
for injection in self.injections:
# timepoint at start of syringe injection
last_index = injection.first_index
t = self.filter_period_end_time[last_index] / ureg.second
axes.plot([t, t], [ymin, ymax], '-', color='crimson')
# Adjust axis to zoom in on baseline.
ymax = self.baseline_power.max() / (ureg.microcalorie / ureg.second)
ymin = self.baseline_power.min() / (ureg.microcalorie / ureg.second)
width = ymax - ymin
ymax += width / 2
ymin -= width / 2
axes.set_ybound(ymin, ymax)
axes.set_xlabel('time (s)')
axes.set_ylabel(r'differential power ($\mu$cal / s)')
axes.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1), ncol=4, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
axes.set_title(self.data_filename)
canvas.print_figure(self.name + '-baseline.png', dpi=500)
def _plot_baseline_subtracted(self, x, y, raw=True, baseline=True):
"""Plot the baseline-subtracted data"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes1 = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Points for fit
axes1.plot(x, y, 'o', color='deepskyblue', markersize=2, alpha=1, label='Baseline-subtracted data')
axes1.set_xlabel('time (s)')
axes1.set_ylabel(r' corr. differential power ($\mu$cal / s)')
axes1.legend(loc='upper center', bbox_to_anchor=(0.2, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if raw:
axes2 = axes1.twinx()
axes2.plot(x, self.differential_power, 'o', color='gray', markersize=2, alpha=.3, label='Raw data')
axes2.set_ylabel(r'raw differential power ($\mu$cal / s)')
axes2.legend(loc='upper center', bbox_to_anchor=(0.8, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if baseline:
axes2.plot(x, self.baseline_power, '-', color='black', alpha=.3, label='baseline')
axes1.set_title(self.data_filename)
canvas.print_figure(self.name + '-subtracted.png', dpi=500)
def _retrieve_fit_indices(self, frac):
"""Form list of data to fit.
"""
x = list()
y = list()
fit_indices = list()
# Add data prior to first injection
for index in range(0, self.injections[0].first_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
# Add last x% of each injection.
for injection in self.injections:
start_index = injection.first_index
end_index = injection.last_index + 1
start_index = end_index - int((end_index - start_index) * frac)
for index in range(start_index, end_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
x = numpy.array(x)
y = numpy.array(y)
fit_indices = numpy.array(fit_indices)
return fit_indices, x, y
def fit_gaussian_process_baseline(self, fit_fraction=0.2, theta0=5.0, nugget=1.0, plot=True):
"""
Gaussian Process fit of baseline.
fit_fraction : float, default 0.2
fraction of baseline to use for fitting the GP.
theta0 : float 5.0, default 5.0
The parameters in the autocorrelation model.
nugget : float, default 1.0
Introduce a nugget effect to allow smooth predictions from noisy data.
plot : bool, default True
Generate plots of the baseline fit
:return:
:rtype:
"""
from sklearn import gaussian_process
# Retrieve a reduced set of data
# (data up until first injection and x percent before every injection)
fit_indices, x, y = self._retrieve_fit_indices(fit_fraction)
# sklearn requires a 2d array, so make it pseudo 2d
full_x = numpy.atleast_2d(self.filter_period_end_time).T
x = numpy.atleast_2d(x).T
full_y = numpy.array(self.differential_power).T
y = numpy.array(y).T
# TODO look into GaussianProcessRegressor http://bit.ly/2kpUs0b
# current API will be deprecated as of scikit learn 0.8
gp = gaussian_process.GaussianProcess(regr='quadratic',
corr='squared_exponential',
theta0=theta0,
nugget=nugget,
random_start=100)
# Fit only based on the reduced set of the data
gp.fit(x, y)
y_pred, mean_squared_error = gp.predict(full_x, eval_MSE=True)
sigma = numpy.sqrt(mean_squared_error)
self.baseline_power = Quantity(y_pred, 'microcalories per second')
self.baseline_fit_data = {'x': full_x, 'y': y_pred, 'indices': fit_indices}
self.baseline_subtracted = self.differential_power - self.baseline_power
if plot:
self._plot_gaussian_baseline(full_x, full_y, sigma, x, y, y_pred)
self._plot_baseline_subtracted(full_x, self.baseline_subtracted)
def integrate_heat(self):
"""
Compute the heat evolved from each injection from differental power timeseries data.
"""
# Integrate heat produced by each injection.
for injection in self.injections:
# determine initial and final samples for injection i
# index of timepoint for first filtered differential power measurement
first_index = injection.first_index
# index of timepoint for last filtered differential power measurement
last_index = injection.last_index
# Determine excess energy input into sample cell (with respect to reference cell) throughout this injection and measurement period.
excess_energy_input = injection.filter_period * (
self.differential_power[
first_index:(last_index + 1)] - self.baseline_power[
first_index:(last_index + 1)]).sum()
logger.debug("injection %d, filter period %f s, integrating sample %d to %d" % (
injection.number,
injection.filter_period / ureg.second,
first_index,
last_index))
# Determine total heat evolved.
evolved_heat = - excess_energy_input
# Store heat evolved from this injection.
injection.evolved_heat = evolved_heat
return
class ExperimentYaml(BaseExperiment):
@staticmethod
def _parse_yaml(yaml_filename):
"""Open the yaml file and read is contents"""
import yaml
with open(yaml_filename, 'r') as infile:
# Experiment parameters
yaml_input = yaml.load(infile)
infile.close()
return yaml_input
def __init__(self, yaml_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentYaml, self).__init__(yaml_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = dict()
self.syringe_concentration = dict()
# concentrations of various species in sample cell
self.sample_cell_contents = dict()
self.cell_concentration = dict()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
self.name = experiment_name
# Store the datafile filename.
self.data_filename = yaml_filename
# Check to make sure we can access the file.
if not os.access(yaml_filename, os.R_OK):
raise IOError("The file '%s' cannot be opened." % yaml_filename)
yaml_input = self._parse_yaml(yaml_filename)
# TODO more preliminary dict entry validations
if len(yaml_input['injection_heats']) != len(yaml_input['injection_volumes']):
raise ValueError('Mismatch between number of heats and volumes per injection in %s.' % yaml_filename)
# Extract and store data about the experiment.
self.number_of_injections = len(yaml_input['injection_heats'])
self.temperature = Quantity(yaml_input['temperature'],
yaml_input['temperature_unit'])
# Store the stated syringe concentration(s)
for key in yaml_input['syringe_concentrations'].keys():
self.syringe_concentration[key] = Quantity(yaml_input['syringe_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Store the stated cell concentration(s)
for key in yaml_input['sample_cell_concentrations'].keys():
self.cell_concentration[key] = Quantity(yaml_input['sample_cell_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Extract and store metadata about injections.
for index, (heat, volume) in enumerate(zip(yaml_input['injection_heats'], yaml_input['injection_volumes']), start=1):
# Extract data for injection and apply appropriate unit conversions.
# Entering 0.0 for any values not in the yaml.
# TODO some values are set in integrate_heat functions, but we
# currently ignore all but the heat
injectiondict = dict()
injectiondict['number'] = index
injectiondict['volume'] = Quantity(volume, yaml_input['volume_unit'])
injectiondict['duration'] = 0.0 * ureg.second
# time between beginning of injection and beginning of next
# injection
injectiondict['spacing'] = 0.0 * ureg.second
# time over which data channel is averaged to produce a single
# measurement
injectiondict['filter_period'] = 0.0 * ureg.second
# Possible input includes heat / moles of injectant, or raw heat
injectiondict['titrant_amount'] = sum(
self.syringe_concentration.values()) * Quantity(volume, yaml_input['volume_unit'])
try:
injectiondict['evolved_heat'] = Quantity(heat, yaml_input['heat_unit']).to('microcalorie')
except DimensionalityError:
# TODO This is probably only really correct for one syringe component
# Multipy by number of moles injected
evolved_heat = Quantity(heat, yaml_input['heat_unit']) * (Quantity(volume, yaml_input['volume_unit']) * sum(self.syringe_concentration.values()))
injectiondict['evolved_heat'] = evolved_heat.to('microcalorie')
# Store injection.
self.injections.append(Injection(**injectiondict))
self.observed_injection_heats = Quantity(numpy.zeros(len(self.injections)), 'microcalorie')
self.injection_volumes = Quantity(numpy.zeros(len(self.injections)), 'milliliter')
for index, injection in enumerate(self.injections):
self.observed_injection_heats[index] = injection.evolved_heat
self.injection_volumes[index] = injection.volume
return
class ExperimentOrigin(BaseExperiment):
pass
|
choderalab/bayesian-itc
|
bayesitc/experiments.py
|
Python
|
gpl-3.0
| 36,645
|
[
"Gaussian"
] |
bb4af474d42086652228b661ccc71a0f63a23e755d467e5a4929b0cdc179dc3f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
from statusthing import StatusThing, ColorRGBA
try:
import colorama
def colorize(text,fg=None,bg=None):
try:
cfg = colorama.ansi.Fore.__dict__[fg.upper()] if fg is not None else ''
rfg = colorama.ansi.Fore.__dict__['RESET'] if fg is not None else ''
except:
cfg = ''
rfg = ''
try:
cbg = colorama.ansi.Back.__dict__[bg.upper()] if bg is not None else ''
rbg = colorama.ansi.Back.__dict__['RESET'] if bg is not None else ''
except:
cbg = ''
rft = ''
return cfg+cbg+text+rfg+rbg
except ImportError:
def colorize(text,fg=None,bg=None):
return text
class StatusThingDemo(StatusThing):
def __fini__(self):
self.done()
print("Be seeing you space cowboy!")
print("EOF")
@property
def roygbiv(self):
try:
return self._roygbiv
except AttributeError:
self._roygbiv = [('red','red'),
('orange','orange'),
('yellow','yellow'),
('green','green'),
('blue','blue'),
('indigo',ColorRGBA.colorForHexstring('4D0082')),
('violet',ColorRGBA.colorForHexstring('EE82EE'))]
return self._roygbiv
def hidden(self,fg=None,bg=None,txt=None):
self.foreground.hidden = fg
self.background.hidden = bg
self.text.hidden = txt
self.commit()
self.clear()
# rework in to an array of sections
#
# encapsulate sections into DemoSection objects
# call in sequence
#
def start(self,aBeat=1.75,sections=None):
self.reset()
self.shape = 'circle'
self.foreground.color = 'black'
self.foreground.lineWidth = 2
self.hidden(fg=False,bg=True,txt=True)
sleep(aBeat)
print(colorize("https://github.com/JnyJny/StatusThing",fg='white',bg='blue'))
sleep(aBeat)
self.foreground.blink = True
self.commit()
print("Hey, here I am. Up on the status bar.")
sleep(aBeat*3)
self.foreground.blink = False
self.foreground.hidden = True
self.text.hidden = False
self.text.font = 'Courier Bold'
self.text.fontSize = 14
self.text.foreground = "black"
self.text.string = "Hi"
print("I am stupid excited to see you!")
self.commit()
sleep(aBeat)
self.text.enbiggen = True
self.text.spin = 'fast'
self.commit()
sleep(aBeat)
self.text.enbiggen = False
self.text.spin = False
self.text.hidden = True
self.foreground.hidden = False
self.commit()
print("I am StatusThing and I know lots of tricks.")
sleep(aBeat)
print("For instance, I can change %s, %d and counting" % ( colorize("shape",'green'),
len(self.shapes)))
shapes = list(self.shapes)
try:
shapes.remove('None')
except:
pass
shapes.sort()
for idx,shape in enumerate(shapes):
print(shape.capitalize())
self.shape = shape
self.commit()
pause = aBeat/2. if idx < 7 else (aBeat * (1./(idx)))
sleep(pause)
sleep(aBeat)
print("\nAs you can see, I am quite flexible.")
self.shape = 'circle'
self.commit()
sleep(aBeat*2)
print("I also have %s intelligence!" % (colorize("emotional",'cyan','red')))
sleep(aBeat)
self.foreground.hidden = True
self.background.hidden = False
self.background.fill = 'clear'
self.commit()
for (name,color) in self.roygbiv:
print('\t%s' % name.capitalize())
self.background.fill = color
self.commit()
sleep(aBeat/2)
self.foreground.fill = 'clear'
self.foreground.hidden = False
self.background.hidden = True
self.commit()
print("I know plenty of other colors, but I'm sure you are getting the idea.")
sleep(aBeat)
print("Here's another fun thing I can do...")
sleep(aBeat)
self.hidden(bg=True,fg=True,txt=False)
self.text.foreground = 'black'
self.text.fontSize = 18
self.text.string = '?'
self.commit()
sleep(aBeat)
print("Display text!")
self.text.string = '!'
self.commit()
sleep(aBeat)
print("Unfortunately I only have room for one or two characters...")
sleep(aBeat)
self.text.font = 'Apple Color Emoji'
self.text.fontSize = 22
print("So use unicode characters and make them %s " % (colorize("count!",'green')))
for idx,emoji in enumerate([u'😍',u'👻',u'🎵',u'🎥',u'📫',u'💣',u'➡️',u'⬇️',u'⬅️',u'⬆️',
u'💯',u'🔜',u'♨️',u'♻️',u'🌀',u'⎋',u'⌘',u'⌫',u'☎︎',u'℗',
u'Ω',u'⨁',u'⨂',u'∳',u'✅',u'🚀' ]):
self.text.string = emoji
self.commit()
sleep(aBeat/(idx+1))
self.text.spin = True
self.commit()
self.text.string = u'018f'
self.text.font = 'Courier Bold'
self.commit()
sleep(aBeat/4)
self.text.spin = False
self.commit()
print("Whoever thought up Unicode and emoji was %s" % (colorize('wicked smart!','blue','white')))
sleep(aBeat*3)
print('I saved my best trick for last.')
sleep(aBeat)
print('I can be very %s...' % (colorize('animated','red','yellow')))
sleep(aBeat/2)
self.hidden(fg=False,bg=True,txt=True)
self.shape = 'rounded square'
animations = self.foreground.animations
animations.remove('spincw')
animations.remove('flipx')
for idx,animation in enumerate(animations):
setattr(self.foreground,animation,True)
self.commit()
print('\t%s' % animation.capitalize())
sleep(aBeat*1.50)
setattr(self.foreground,animation,False)
self.commit()
print("Each of my three layers are individually animatiable.")
sleep(aBeat)
print("To be honest %s I have a plethora of options." % colorize('Jefe,','yellow'))
self.hidden(bg=False,fg=False,txt=True)
self.background.throb = True
self.background.fill = ColorRGBA(1,0,0,1)
self.foreground.lineWidth = 2
self.foreground.flipy = True
self.commit()
sleep(aBeat*2)
self.foreground.spinccw = True
self.commit()
sleep(aBeat*2)
self.foreground.spinccw = False
self.foreground.spincw = True
self.background.fill = 'green'
self.commit()
self.shape = 'pentagram'
self.background.enbiggen = True
self.commit()
sleep(aBeat*2)
print("That was fun, but I'll dial it back a notch or two.")
self.shape = "circle"
self.foreground.spincw = False
self.background.throb = False
self.foreground.flipy = False
self.background.enbiggen = False
self.background.stretch = False
self.foreground.lineWidth = 2
self.commit()
sleep(aBeat)
print("I am StatusThing, and I am...")
sleep(aBeat/2)
print(' - JSON configurable')
sleep(aBeat/2)
print(' - Network addressable')
sleep(aBeat/2)
print(' - Bonjour enabled')
sleep(aBeat/2)
print(' - Animated')
sleep(aBeat/2)
print(' - %s' %(colorize('Tons of Fun!!','green')))
sleep(aBeat)
print('Visit my github page and download me today!')
sleep(aBeat)
print('Give a visual voice to anything you can think of.')
sleep(aBeat)
print('Thanks for watching!')
sleep(aBeat)
print(colorize("https://github.com/JnyJny/StatusThing",fg='white',bg='blue'))
sleep(aBeat*3)
print("PS. This demo was written using python bindings that build the")
sleep(aBeat/2)
print(" JSON dictionaries that describe the changes you saw in the video.")
sleep(aBeat/2)
print(" The demo and the bindings are also available on github.")
sleep(aBeat)
self.hidden(bg=True,fg=False,txt=True)
self.commit()
if __name__ == '__main__':
demo = StatusThingDemo()
demo.start()
demo.deactivate()
|
JnyJny/StatusThing
|
Bindings/Python/Demo.py
|
Python
|
mit
| 8,856
|
[
"VisIt"
] |
6133bafc09823d7ee8ca33dc1c635afe43c915e2de32ccbb59d8c376d3b5f8fb
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
'''Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
'''
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d, (nobs,2)
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - np.sqrt(np.finfo(np.double).eps):
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=None,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are perfectly colinear. Cointegration test "
"is not reliable in this case.")
# Edge case where series are too similar
res_adf = (0,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
bert9bert/statsmodels
|
statsmodels/tsa/stattools.py
|
Python
|
bsd-3-clause
| 44,980
|
[
"ADF"
] |
63a17d359f42fc0790767b88e97a40f3bc8fe6c2b8f20f16aa2449041ce1d24a
|
"""Solar analemma."""
from ._skyBase import RadianceSky
from ..material.light import Light
from ..geometry.source import Source
from ladybug.epw import EPW
from ladybug.sunpath import Sunpath
import os
try:
from itertools import izip as zip
writemode = 'wb'
except ImportError:
# python 3
writemode = 'w'
class Analemma(RadianceSky):
"""Generate a radiance-based analemma.
Use Analemma for solar access/sunlight hours studies. For annual daylight/radiation
studies see AnalemmaReversed.
Analemma consists of two files:
1. *.ann file which includes sun geometries and materials.
2. *.mod file includes list of modifiers that are included in *.ann file.
"""
def __init__(self, sun_vectors, sun_up_hours):
"""Radiance-based analemma.
Args:
sun_vectors: A list of sun vectors as (x, y, z).
sun_up_hours: List of hours of the year that corresponds to sun_vectors.
"""
RadianceSky.__init__(self)
vectors = sun_vectors or []
# reverse sun vectors
self._sun_vectors = tuple(tuple(v) for v in vectors)
self._sun_up_hours = sun_up_hours
assert len(sun_up_hours) == len(vectors), \
ValueError(
'Length of vectors [%d] does not match the length of hours [%d]' %
(len(vectors), len(sun_up_hours))
)
@classmethod
def from_json(cls, inp):
"""Create an analemma from a dictionary."""
return cls(inp['sun_vectors'], inp['sun_up_hours'])
@classmethod
def from_location(cls, location, hoys=None, north=0, is_leap_year=False):
"""Generate a radiance-based analemma for a location.
Args:
location: A ladybug location.
hoys: A list of hours of the year (default: range(8760)).
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
sun_vectors = []
sun_up_hours = []
hoys = hoys or range(8760)
north = north or 0
sp = Sunpath.from_location(location, north)
sp.is_leap_year = is_leap_year
for hour in hoys:
sun = sp.calculate_sun_from_hoy(hour)
if sun.altitude < 0:
continue
sun_vectors.append(sun.sun_vector)
sun_up_hours.append(hour)
return cls(sun_vectors, sun_up_hours)
@classmethod
def from_location_sun_up_hours(cls, location, sun_up_hours, north=0,
is_leap_year=False):
"""Generate a radiance-based analemma for a location.
Args:
location: A ladybug location.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
sun_vectors = []
north = north or 0
sp = Sunpath.from_location(location, north)
sp.is_leap_year = is_leap_year
for hour in sun_up_hours:
sun = sp.calculate_sun_from_hoy(hour)
sun_vectors.append(sun.sun_vector)
return cls(sun_vectors, sun_up_hours)
@classmethod
def from_wea(cls, wea, hoys=None, north=0, is_leap_year=False):
"""Generate a radiance-based analemma from a ladybug wea.
NOTE: Only the location from wea will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
wea: A ladybug Wea.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location(wea.location, hoys, north, is_leap_year)
@classmethod
def from_wea_sun_up_hours(cls, wea, sun_up_hours, north=0, is_leap_year=False):
"""Generate a radiance-based analemma from a ladybug wea.
NOTE: Only the location from wea will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
wea: A ladybug Wea.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location_sun_up_hours(wea.location, sun_up_hours, north,
is_leap_year)
@classmethod
def from_epw_file(cls, epw_file, hoys=None, north=0, is_leap_year=False):
"""Create sun matrix from an epw file.
NOTE: Only the location from epw file will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
epw_file: Full path to an epw file.
hoys: A list of hours of the year (default: range(8760)).
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location(EPW(epw_file).location, hoys, north, is_leap_year)
@classmethod
def from_epw_file_sun_up_hours(cls, epw_file, sun_up_hours, north=0,
is_leap_year=False):
"""Create sun matrix from an epw file.
NOTE: Only the location from epw file will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
epw_file: Full path to an epw file.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location_sun_up_hours(EPW(epw_file).location, sun_up_hours,
north, is_leap_year)
@property
def isAnalemma(self):
"""Return True."""
return True
@property
def is_climate_based(self):
"""Return True if generated based on values from weather file."""
return False
@property
def analemma_file(self):
"""Analemma file name.
Use this file to create the octree.
"""
return 'analemma.rad'
@property
def sunlist_file(self):
"""Sun list file name.
Use this file as the list of modifiers in rcontrib.
"""
return 'analemma.mod'
@property
def sun_vectors(self):
"""Return list of sun vectors."""
return self._sun_vectors
@property
def sun_up_hours(self):
"""Return list of hours for sun vectors."""
return self._sun_up_hours
def execute(self, working_dir):
fp = os.path.join(working_dir, self.analemma_file) # analemma file (geo and mat)
sfp = os.path.join(working_dir, self.sunlist_file) # modifier list
with open(fp, writemode) as outf, open(sfp, writemode) as outm:
for hoy, vector in zip(self.sun_up_hours, self.sun_vectors):
# use minute of the year to name sun positions
moy = int(round(hoy * 60))
mat = Light('sol_%06d' % moy, 1e6, 1e6, 1e6)
sun = Source('sun_%06d' % moy, vector, 0.533, mat)
outf.write(sun.to_rad_string(True).replace('\n', ' ') + '\n')
outm.write('sol_%06d\n' % moy)
def duplicate(self):
"""Duplicate this class."""
return Analemma(self.sun_vectors, self.sun_up_hours)
def to_rad_string(self):
"""Get the radiance command line as a string."""
raise AttributeError(
'analemma does not have a single line command. Try execute method.'
)
def to_json(self):
"""Convert analemma to a dictionary."""
return {'sun_vectors': self.sun_vectors, 'sun_up_hours': self.sun_up_hours}
def ToString(self):
"""Overwrite .NET ToString method."""
return self.__repr__()
def __repr__(self):
"""Analemma representation."""
return 'Analemma: #%d' % len(self.sun_vectors)
class AnalemmaReversed(Analemma):
"""Generate a radiance-based analemma.
Reversed Analemma reverses direction of input sun vectors. Use reversed Analemma for
radiation and daylight studies.
Analemma consists of two files:
1. *_reversed.ann file which includes sun geometries and materials.
2. *.mod file includes list of modifiers that are included in
*_reversed.ann file.
"""
@property
def analemma_file(self):
"""Analemma file name.
Use this file to create the octree.
"""
return 'analemma_reversed.rad'
def execute(self, working_dir):
fp = os.path.join(working_dir, self.analemma_file) # analemma file (geo and mat)
sfp = os.path.join(working_dir, self.sunlist_file) # modifier list
with open(fp, writemode) as outf, open(sfp, writemode) as outm:
for hoy, vector in zip(self.sun_up_hours, self.sun_vectors):
# use minute of the year to name sun positions
moy = int(round(hoy * 60))
# reverse sun vector
r_vector = tuple(-1 * i for i in vector)
mat = Light('sol_%06d' % moy, 1e6, 1e6, 1e6)
sun = Source('sun_%06d' % moy, r_vector, 0.533, mat)
outf.write(sun.to_rad_string(True).replace('\n', ' ') + '\n')
outm.write('sol_%06d\n' % moy)
|
ladybug-analysis-tools/honeybee
|
honeybee_plus/radiance/sky/analemma.py
|
Python
|
gpl-3.0
| 9,941
|
[
"EPW"
] |
b470ee2d75c10a22443b88aefe424b8a8dda1d5e9e9403313bf0df779f08a940
|
import subprocess
import traceback
from PyQt5 import QtCore, QtWidgets, QtGui
from config import VERSION, ICONS8_URL, LOADINGIO_URL, GITHUB_URL, GPL_URL, EXE
from update import Updater
from utils import get_download_window
license_txt = None
def show_msgbox(title, msg, icon=QtWidgets.QMessageBox.NoIcon, details=None, is_traceback=False):
msgbox = QtWidgets.QMessageBox(parent=get_download_window())
msgbox.setWindowTitle(title)
msgbox.setIcon(icon)
msgbox.setText(msg)
if details:
if is_traceback:
msgbox.setDetailedText("".join(traceback.format_exception(*details)))
else:
msgbox.setDetailedText(details)
msgbox.exec()
def show_splash(pixmap, parent=None, opacity=0.97, vfont="Fira Sans", vfont_size=11, vfont_bold=True):
splashie = QtWidgets.QSplashScreen(parent, pixmap, QtCore.Qt.WindowStaysOnTopHint)
font = QtGui.QFont(vfont) if vfont else splashie.font()
font.setPointSize(vfont_size)
font.setBold(vfont_bold)
splashie.setFont(font)
splashie.setWindowOpacity(opacity)
if VERSION:
splashie.showMessage("v" + VERSION, QtCore.Qt.AlignRight | QtCore.Qt.AlignBottom, QtCore.Qt.red)
splashie.show()
return splashie
def show_license(lfile, fallback_msg="", is_html=False, parent=None):
global license_txt
dlg = QtWidgets.QDialog(parent, QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMinMaxButtonsHint | QtCore.Qt.WindowCloseButtonHint)
dlg.resize(600, 600)
dlg.setWindowTitle(QtCore.QCoreApplication.translate("show_license", "License"))
vbox = QtWidgets.QVBoxLayout()
hbox1 = QtWidgets.QHBoxLayout()
hbox2 = QtWidgets.QHBoxLayout()
if license_txt is None:
if lfile.open(QtCore.QIODevice.ReadOnly | QtCore.QFile.Text):
license_txt = QtCore.QTextStream(lfile).readAll()
lfile.close()
else:
license_txt = fallback_msg
if is_html:
txt_edit = QtWidgets.QTextBrowser()
txt_edit.setReadOnly(True)
txt_edit.setHtml(license_txt)
txt_edit.setOpenExternalLinks(True)
else:
txt_edit = QtWidgets.QTextEdit()
txt_edit.setReadOnly(True)
txt_edit.insertPlainText(license_txt)
text_cursor = txt_edit.textCursor()
text_cursor.movePosition(QtGui.QTextCursor.Start)
txt_edit.setTextCursor(text_cursor)
close_btn = QtWidgets.QPushButton(QtCore.QCoreApplication.translate("show_license", "&Close"))
close_btn.clicked.connect(dlg.close)
hbox1.addWidget(txt_edit)
hbox2.addWidget(close_btn)
hbox2.setAlignment(QtCore.Qt.AlignRight)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
dlg.setLayout(vbox)
dlg.exec()
class AboutDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.init_ui()
def init_ui(self):
vbox = QtWidgets.QVBoxLayout()
hbox1 = QtWidgets.QHBoxLayout()
hbox2 = QtWidgets.QHBoxLayout()
hbox3 = QtWidgets.QHBoxLayout()
hbox4 = QtWidgets.QHBoxLayout()
hbox5 = QtWidgets.QHBoxLayout()
hbox6 = QtWidgets.QHBoxLayout()
hbox7 = QtWidgets.QHBoxLayout()
self.icon = QtGui.QPixmap(":/ytdl_icon.png").scaled(
92, 92, QtCore.Qt.IgnoreAspectRatio, QtCore.Qt.SmoothTransformation)
self.icon_lbl = QtWidgets.QLabel()
self.icon_lbl.setPixmap(self.icon)
hbox1.addWidget(self.icon_lbl)
hbox1.setAlignment(QtCore.Qt.AlignCenter)
self.title_lbl = QtWidgets.QLabel("yt-dl")
big_font = self.title_lbl.font()
big_font.setPointSize(20)
big_font.setBold(True)
self.title_lbl.setFont(big_font)
hbox2.addWidget(self.title_lbl)
hbox2.setAlignment(QtCore.Qt.AlignCenter)
self.version_lbl = QtWidgets.QLabel("v" + VERSION)
hbox3.addWidget(self.version_lbl)
hbox3.setAlignment(QtCore.Qt.AlignCenter)
self.desc_lbl = QtWidgets.QLabel(self.tr("An easy-to-use YouTube downloader (GUI),<br>"
"created with PyQt5, pytube and beautifulsoup4.<br>"
"Loading GIF: <a href=\"{1}\">{1}</a><br>"
"Github page: <a href=\"{2}\">{2}</a>")
.format(ICONS8_URL, LOADINGIO_URL, GITHUB_URL))
self.desc_lbl.setTextFormat(QtCore.Qt.RichText)
self.desc_lbl.setOpenExternalLinks(True)
self.desc_lbl.setAlignment(QtCore.Qt.AlignCenter)
hbox4.addWidget(self.desc_lbl)
hbox4.setAlignment(QtCore.Qt.AlignCenter)
self.copyright_lbl = QtWidgets.QLabel("\u00A9 Franz Piontek, 2017")
hbox5.addWidget(self.copyright_lbl)
hbox5.setAlignment(QtCore.Qt.AlignCenter)
self.license_note_lbl = QtWidgets.QLabel(self.tr("This program is free software: you can redistribute it "
"and/or<br>modify it under the terms of the GNU General "
"Public License as<br>published by the Free Software "
"Foundation, either version 3 of<br>the License, or "
"(at your option) any later version.<br>"
"This program is distributed in the hope that it will be "
"useful, but<br>WITHOUT ANY WARRANTY; without even the "
"implied warranty<br>of MERCHANTABILITY or FITNESS FOR A "
"PARTICULAR PURPOSE.<br>"
"See the GNU General Public License for more details<br>"
"(click \"License\" or visit <a href=\"{0}\">{0}</a>).")
.format(GPL_URL))
self.license_note_lbl.setTextFormat(QtCore.Qt.RichText)
self.license_note_lbl.setOpenExternalLinks(True)
self.license_note_lbl.setAlignment(QtCore.Qt.AlignCenter)
font = self.license_note_lbl.font()
font.setPointSize(font.pointSize() - 2)
self.license_note_lbl.setFont(font)
hbox6.addWidget(self.license_note_lbl)
hbox6.setAlignment(QtCore.Qt.AlignCenter)
self.license_btn = QtWidgets.QPushButton(self.tr("&License"))
lfile = QtCore.QFile(":/LICENSE.html")
fallback_msg = self.tr("LICENSE file couldn't be found/accessed.\nyt-dl used to be "
"under the GNU GPL v3.\n"
"Please update the application or visit "
"https://github.com/FranzPio/yt-dl for more information.")
self.license_btn.clicked.connect(lambda: show_license(lfile, fallback_msg, is_html=True, parent=self))
self.about_qt_btn = QtWidgets.QPushButton(self.tr("&About Qt"))
self.about_qt_btn.clicked.connect(lambda: QtWidgets.QMessageBox.aboutQt(self, self.tr("About Qt")))
self.close_btn = QtWidgets.QPushButton(self.tr("&Close"))
self.close_btn.clicked.connect(self.close)
self.close_btn.setDefault(True)
hbox7.addWidget(self.license_btn)
hbox7.addWidget(self.about_qt_btn)
hbox7.addStretch(1)
hbox7.addWidget(self.close_btn)
hbox7.setAlignment(QtCore.Qt.AlignCenter)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addSpacing(5)
vbox.addLayout(hbox5)
vbox.addSpacing(7)
vbox.addLayout(hbox6)
vbox.addSpacing(12)
vbox.addLayout(hbox7)
self.setLayout(vbox)
self.resize(self.sizeHint())
self.setFixedSize(self.sizeHint())
self.setWindowTitle(self.tr("About"))
class UpdateDialog(QtWidgets.QDialog):
restart_wanted = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent, QtCore.Qt.CustomizeWindowHint | QtCore.Qt.Sheet)
self.init_ui()
self.start_update()
def init_ui(self):
hbox = QtWidgets.QHBoxLayout()
vbox1 = QtWidgets.QVBoxLayout()
vbox2 = QtWidgets.QVBoxLayout()
self.loading_indicator = QtWidgets.QLabel()
self.spinning_wheel = QtGui.QMovie(":/rolling.gif")
self.spinning_wheel.setScaledSize(QtCore.QSize(32, 32))
self.loading_indicator.setMovie(self.spinning_wheel)
vbox1.addWidget(self.loading_indicator, alignment=QtCore.Qt.AlignTop)
self.status_lbl = QtWidgets.QLabel()
# self.update_dlg.status_lbl.setWordWrap(True)
self.progress_bar = QtWidgets.QProgressBar()
self.progress_bar.hide()
sp_retain_space = self.progress_bar.sizePolicy()
sp_retain_space.setRetainSizeWhenHidden(True)
self.progress_bar.setSizePolicy(sp_retain_space)
vbox2.addWidget(self.status_lbl)
vbox2.addSpacing(5)
vbox2.addWidget(self.progress_bar)
hbox.addLayout(vbox1)
hbox.addSpacing(10)
hbox.addLayout(vbox2)
self.setLayout(hbox)
def start_update(self):
self.spinning_wheel.start()
self.updater = Updater()
self.thread = QtCore.QThread()
self.updater.moveToThread(self.thread)
self.updater.finished.connect(self.close)
self.updater.status_update.connect(self.update_status)
self.updater.success.connect(self.success)
self.updater.error.connect(show_msgbox)
self.updater.information.connect(show_msgbox)
self.updater.progress.connect(self.update_progress)
self.thread.started.connect(self.updater.check_for_updates)
self.thread.start()
def update_status(self, new_status):
self.status_lbl.setText(new_status)
self.progress_bar.hide()
# self.resize(self.sizeHint())
# self.update()
# QtWidgets.qApp.processEvents()
def update_progress(self, bytes_transferred, bytes_total):
self.progress_bar.show()
self.progress_bar.setMaximum(bytes_total)
self.progress_bar.setValue(bytes_transferred)
self.progress_bar.setFormat(self.tr("%s of %s MB") % (round(self.progress_bar.value() / 1000000, 1),
round(self.progress_bar.maximum() / 1000000, 1)))
def keyPressEvent(self, evt):
if evt.key() == QtCore.Qt.Key_Escape:
self.close()
else:
QtWidgets.QDialog.keyPressEvent(self, evt)
def closeEvent(self, evt):
self.thread.quit()
self.thread.wait(100)
if not self.thread.isFinished():
self.thread.terminate()
self.thread.wait(2000)
QtWidgets.QDialog.closeEvent(self, evt)
def success(self):
self.close()
self.restart()
def restart(self):
self.restart_wanted.emit(True)
QtWidgets.qApp.processEvents()
QtWidgets.qApp.closeAllWindows()
QtWidgets.qApp.quit()
subprocess.run(EXE)
|
FranzPio/yt-dl
|
src/dialogs.py
|
Python
|
gpl-3.0
| 11,382
|
[
"VisIt"
] |
9a588985c4288315241bc40a43c9c38d6f1d212e75749ced5b2c8967677eeec2
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os.path
import platform
import sys
import wx
import itertools
#from invesalius.project import Project
INVESALIUS_VERSION = "3.1"
#---------------
# Measurements
MEASURE_NAME_PATTERN = _("M %d")
MEASURE_LINEAR = 101
MEASURE_ANGULAR = 102
DEFAULT_MEASURE_COLOUR = (1,0,0)
DEFAULT_MEASURE_BG_COLOUR = (250/255.0, 247/255.0, 218/255.0)
DEFAULT_MEASURE_RADIUS = 1
DEFAULT_MEASURE_TYPE = MEASURE_LINEAR
PROP_MEASURE = 0.8
STEREO_OFF = _(" Off")
STEREO_RED_BLUE = _("Red-blue")
STEREO_CRISTAL = _("CristalEyes")
STEREO_INTERLACED = _("Interlaced")
STEREO_LEFT = _("Left")
STEREO_RIGHT = _("Right")
STEREO_DRESDEN = _("Dresden")
STEREO_CHECKBOARD = _("Checkboard")
STEREO_ANAGLYPH = _("Anaglyph")
# VTK text
TEXT_SIZE_SMALL = 11
TEXT_SIZE = 12
TEXT_SIZE_LARGE = 16
TEXT_COLOUR = (1,1,1)
(X,Y) = (0.03, 0.97)
(XZ, YZ) = (0.05, 0.93)
TEXT_POS_LEFT_UP = (X, Y)
#------------------------------------------------------------------
TEXT_POS_LEFT_DOWN = (X, 1-Y) # SetVerticalJustificationToBottom
TEXT_POS_LEFT_DOWN_ZERO = (X, 1-YZ)
#------------------------------------------------------------------
TEXT_POS_RIGHT_UP = (1-X, Y) # SetJustificationToRight
#------------------------------------------------------------------
TEXT_POS_RIGHT_DOWN = (1-X, 1-Y) # SetVerticalJustificationToBottom &
# SetJustificationToRight
#------------------------------------------------------------------
TEXT_POS_HCENTRE_DOWN = (0.5, 1-Y) # SetJustificationToCentered
# ChildrticalJustificationToBottom
TEXT_POS_HCENTRE_DOWN_ZERO = (0.5, 1-YZ)
#------------------------------------------------------------------
TEXT_POS_HCENTRE_UP = (0.5, Y) # SetJustificationToCentered
#------------------------------------------------------------------
TEXT_POS_VCENTRE_RIGHT = (1-X, 0.5) # SetVerticalJustificationToCentered
# SetJustificationToRight
TEXT_POS_VCENTRE_RIGHT_ZERO = (1-XZ, 0.5)
#------------------------------------------------------------------
TEXT_POS_VCENTRE_LEFT = (X, 0.5) # SetVerticalJustificationToCentered
#------------------------------------------------------------------
# Slice orientation
AXIAL = 1
CORONAL = 2
SAGITAL = 3
VOLUME = 4
SURFACE = 5
# Measure type
LINEAR = 6
ANGULAR = 7
# Colour representing each orientation
ORIENTATION_COLOUR = {'AXIAL': (1,0,0), # Red
'CORONAL': (0,1,0), # Green
'SAGITAL': (0,0,1)} # Blue
IMPORT_INTERVAL = [_("Keep all slices"), _("Skip 1 for each 2 slices"),
_("Skip 2 for each 3 slices"), _("Skip 3 for each 4 slices"),
_("Skip 4 for each 5 slices"),_("Skip 5 for each 6 slices")]
# Camera according to slice's orientation
#CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, -1, 0), "SAGITAL":(1, 0, 0)}
#CAM_VIEW_UP = {"AXIAL":(0, 1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
AXIAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, -1, 0), "SAGITAL":(1, 0, 0)}
AXIAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, 1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
SAGITAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, 1, 0), "SAGITAL":(-1, 0, 0)}
SAGITAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, -1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
CORONAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, 1, 0), "SAGITAL":(-1, 0, 0)}
CORONAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, -1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
SLICE_POSITION = {AXIAL:[AXIAL_SLICE_CAM_VIEW_UP, AXIAL_SLICE_CAM_POSITION],
SAGITAL:[SAGITAL_SLICE_CAM_VIEW_UP, SAGITAL_SLICE_CAM_POSITION],
CORONAL:[CORONAL_SLICE_CAM_VIEW_UP, CORONAL_SLICE_CAM_POSITION]}
#Project Status
#NEW_PROJECT = 0
#OPEN_PROJECT = 1
#CHANGE_PROJECT = 2
#SAVE_PROJECT = 3
PROJ_NEW = 0
PROJ_OPEN = 1
PROJ_CHANGE = 2
PROJ_CLOSE = 3
PROJ_MAX = 4
####
MODE_RP = 0
MODE_NAVIGATOR = 1
MODE_RADIOLOGY = 2
MODE_ODONTOLOGY = 3
#Crop box sides code
AXIAL_RIGHT = 1
AXIAL_LEFT = 2
AXIAL_UPPER = 3
AXIAL_BOTTOM = 4
SAGITAL_RIGHT = 5
SAGITAL_LEFT = 6
SAGITAL_UPPER = 7
SAGITAL_BOTTOM = 8
CORONAL_RIGHT = 9
CORONAL_LEFT = 10
CORONAL_UPPER = 11
CORONAL_BOTTOM = 12
CROP_PAN = 13
#Color Table from Slice
#NumberOfColors, SaturationRange, HueRange, ValueRange
SLICE_COLOR_TABLE = {_("Default "):(None,(0,0),(0,0),(0,1)),
_("Hue"):(None,(1,1),(0,1),(1,1)),
_("Saturation"):(None,(0,1),(0.6,0.6),(1,1)),
_("Desert"):(256, (1,1), (0, 0.1), (1,1)),
_("Rainbow"):(256,(1,1),(0,0.8),(1,1)),
_("Ocean"):(256,(1,1),(0.667, 0.5),(1,1)),
_("Inverse Gray"):(256, (0, 0), (0, 0), (1,0)),
}
# Volume view angle
VOL_FRONT = wx.NewId()
VOL_BACK = wx.NewId()
VOL_RIGHT = wx.NewId()
VOL_LEFT = wx.NewId()
VOL_TOP = wx.NewId()
VOL_BOTTOM = wx.NewId()
VOL_ISO = wx.NewId()
# Camera according to volume's orientation
AXIAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,0,1), VOL_BACK:(0,0,1), VOL_RIGHT:(0,0,1),\
VOL_LEFT:(0,0,1), VOL_TOP:(0,1,0), VOL_BOTTOM:(0,-1,0),\
VOL_ISO:(0,0,1)}
AXIAL_VOLUME_CAM_POSITION = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,1,0), VOL_RIGHT:(-1,0,0),\
VOL_LEFT:(1,0,0), VOL_TOP:(0,0,1), VOL_BOTTOM:(0,0,-1),\
VOL_ISO:(0.5,-1,0.5)}
SAGITAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,-1,0), VOL_RIGHT:(0,-1,1),\
VOL_LEFT:(0,-1,1), VOL_TOP:(1,-1,0), VOL_BOTTOM:(-1,1,0),\
VOL_ISO:(0,-1,0)}
SAGITAL_VOLUME_CAM_POSITION = {VOL_FRONT:(-1,0,0), VOL_BACK:(1,0,0), VOL_RIGHT:(0,0,1),\
VOL_LEFT:(0,0,-1), VOL_TOP:(0,-1,0), VOL_BOTTOM:(0,1,0),\
VOL_ISO:(-1,-0.5,-0.5)}
CORONAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,-1,0), VOL_RIGHT:(0,-1,0),\
VOL_LEFT:(0,-1,0), VOL_TOP:(0,1,0), VOL_BOTTOM:(0,-1,0),\
VOL_ISO:(0,-1,0)}
CORONAL_VOLUME_CAM_POSITION = {VOL_FRONT:(0,0,-1), VOL_BACK:(0,0,1), VOL_RIGHT:(-1,0,0),\
VOL_LEFT:(1,0,0), VOL_TOP:(0,-1,0), VOL_BOTTOM:(0,1,0),\
VOL_ISO:(0.5,-0.5,-1)}
VOLUME_POSITION = {AXIAL: [AXIAL_VOLUME_CAM_VIEW_UP, AXIAL_VOLUME_CAM_POSITION],
SAGITAL: [SAGITAL_VOLUME_CAM_VIEW_UP, SAGITAL_VOLUME_CAM_POSITION],
CORONAL: [CORONAL_VOLUME_CAM_VIEW_UP, CORONAL_VOLUME_CAM_POSITION]}
# Mask threshold options
#proj = Project()
#THRESHOLD_RANGE = proj.threshold_modes[_("Bone")]
THRESHOLD_RANGE = [0,3033]
THRESHOLD_PRESETS_INDEX = _("Bone")
THRESHOLD_HUE_RANGE = (0, 0.6667)
THRESHOLD_INVALUE = 5000
THRESHOLD_OUTVALUE = 0
# Mask properties
MASK_NAME_PATTERN = _("Mask %d")
MASK_OPACITY = 0.40
#MASK_OPACITY = 0.35
MASK_COLOUR = [[0.33, 1, 0.33],
[1, 1, 0.33],
[0.33, 0.91, 1],
[1, 0.33, 1],
[1, 0.68, 0.33],
[1, 0.33, 0.33],
[0.33333333333333331, 0.33333333333333331, 1.0],
#(1.0, 0.33333333333333331, 0.66666666666666663),
[0.74901960784313726, 1.0, 0.0],
[0.83529411764705885, 0.33333333333333331, 1.0]]#,
#(0.792156862745098, 0.66666666666666663, 1.0),
#(1.0, 0.66666666666666663, 0.792156862745098), # too "light"
#(0.33333333333333331, 1.0, 0.83529411764705885),#],
#(1.0, 0.792156862745098, 0.66666666666666663),
#(0.792156862745098, 1.0, 0.66666666666666663), # too "light"
#(0.66666666666666663, 0.792156862745098, 1.0)]
MEASURE_COLOUR = itertools.cycle([[1, 0, 0],
[1, 0.4, 0],
[0, 0, 1],
[1, 0, 1],
[0, 0.6, 0]])
SURFACE_COLOUR = [(0.33, 1, 0.33),
(1, 1, 0.33),
(0.33, 0.91, 1),
(1, 0.33, 1),
(1, 0.68, 0.33),
(1, 0.33, 0.33),
(0.33333333333333331, 0.33333333333333331, 1.0),
(1.0, 0.33333333333333331, 0.66666666666666663),
(0.74901960784313726, 1.0, 0.0),
(0.83529411764705885, 0.33333333333333331, 1.0),
(0.792156862745098, 0.66666666666666663, 1.0),
(1.0, 0.66666666666666663, 0.792156862745098),
(0.33333333333333331, 1.0, 0.83529411764705885),
(1.0, 0.792156862745098, 0.66666666666666663),
(0.792156862745098, 1.0, 0.66666666666666663),
(0.66666666666666663, 0.792156862745098, 1.0)]
# Related to slice editor brush
BRUSH_CIRCLE = 0 #
BRUSH_SQUARE = 1
DEFAULT_BRUSH_FORMAT = BRUSH_CIRCLE
BRUSH_DRAW = 0
BRUSH_ERASE = 1
BRUSH_THRESH = 2
BRUSH_THRESH_ERASE = 3
BRUSH_THRESH_ADD_ONLY = 4
BRUSH_THRESH_ERASE_ONLY = 5
DEFAULT_BRUSH_OP = BRUSH_THRESH
BRUSH_OP_NAME = [_("Draw"), _("Erase"), _("Threshold")]
BRUSH_COLOUR = (0,0,1.0)
BRUSH_SIZE = 30
BRUSH_MAX_SIZE = 100
# Surface creation values. Each element's list contains:
# 0: imagedata reformat ratio
# 1: smooth_iterations
# 2: smooth_relaxation_factor
# 3: decimate_reduction
SURFACE_QUALITY = {
_("Low"): (3, 2, 0.3000, 0.4),
_("Medium"): (2, 2, 0.3000, 0.4),
_("High"): (0, 1, 0.3000, 0.1),
_("Optimal *"): (0, 2, 0.3000, 0.4)}
DEFAULT_SURFACE_QUALITY = _("Optimal *")
SURFACE_QUALITY_LIST = [_("Low"),_("Medium"),_("High"),_("Optimal *")]
# Surface properties
SURFACE_TRANSPARENCY = 0.0
SURFACE_NAME_PATTERN = _("Surface %d")
# Imagedata - window and level presets
WINDOW_LEVEL = {_("Abdomen"):(350,50),
_("Bone"):(2000, 300),
_("Brain posterior fossa"):(120,40),
_("Brain"):(80,40),
_("Default"):(None, None), #Control class set window and level from DICOM
_("Emphysema"):(500,-850),
_("Ischemia - Hard, non contrast"):(15,32),
_("Ischemia - Soft, non contrast"):(80,20),
_("Larynx"):(180, 80),
_("Liver"):(2000, -500),
_("Lung - Soft"):(1600,-600),
_("Lung - Hard"):(1000,-600),
_("Mediastinum"):(350,25),
_("Manual"):(None, None), #Case the user change window and level
_("Pelvis"): (450,50),
_("Sinus"):(4000, 400),
_("Vasculature - Hard"):(240,80),
_("Vasculature - Soft"):(650,160),
_("Contour"): (255, 127)}
REDUCE_IMAGEDATA_QUALITY = 0
FILE_PATH = os.path.split(__file__)[0]
if hasattr(sys,"frozen") and (sys.frozen == "windows_exe"\
or sys.frozen == "console_exe"):
abs_path = os.path.abspath(FILE_PATH + os.sep + ".." + os.sep + ".." + os.sep + "..")
ICON_DIR = os.path.join(abs_path, "icons")
SAMPLE_DIR = os.path.join(FILE_PATH, 'samples')
DOC_DIR = os.path.join(FILE_PATH, 'docs')
folder=RAYCASTING_PRESETS_DIRECTORY= os.path.join(abs_path, "presets", "raycasting")
RAYCASTING_PRESETS_COLOR_DIRECTORY = os.path.join(abs_path, "presets", "raycasting", "color_list")
else:
ICON_DIR = os.path.abspath(os.path.join(FILE_PATH, '..', 'icons'))
SAMPLE_DIR = os.path.abspath(os.path.join(FILE_PATH,'..', 'samples'))
DOC_DIR = os.path.abspath(os.path.join(FILE_PATH,'..', 'docs'))
folder=RAYCASTING_PRESETS_DIRECTORY= os.path.abspath(os.path.join(".",
"presets",
"raycasting"))
RAYCASTING_PRESETS_COLOR_DIRECTORY = os.path.abspath(os.path.join(".",
"presets",
"raycasting",
"color_list"))
# MAC App
if not os.path.exists(ICON_DIR):
ICON_DIR = os.path.abspath(os.path.join(FILE_PATH, '..', '..', '..', '..', 'icons'))
SAMPLE_DIR = os.path.abspath(os.path.join(FILE_PATH,'..', '..', '..', '..', 'samples'))
DOC_DIR = os.path.abspath(os.path.join(FILE_PATH,'..', '..', '..', '..', 'docs'))
ID_TO_BMP = {VOL_FRONT: [_("Front"), os.path.join(ICON_DIR, "view_front.png")],
VOL_BACK: [_("Back"), os.path.join(ICON_DIR, "view_back.png")],
VOL_TOP: [_("Top"), os.path.join(ICON_DIR, "view_top.png")],
VOL_BOTTOM: [_("Bottom"), os.path.join(ICON_DIR, "view_bottom.png")],
VOL_RIGHT: [_("Right"), os.path.join(ICON_DIR, "view_right.png")],
VOL_LEFT: [_("Left"), os.path.join(ICON_DIR, "view_left.png")],
VOL_ISO:[_("Isometric"), os.path.join(ICON_DIR,"view_isometric.png")]
}
# if 1, use vtkVolumeRaycastMapper, if 0, use vtkFixedPointVolumeRayCastMapper
TYPE_RAYCASTING_MAPPER = 0
RAYCASTING_FILES = {_("Airways"): "Airways.plist",
_("Airways II"): "Airways II.plist",
_("Black & White"): "Black & White.plist",
_("Bone + Skin"): "Bone + Skin.plist",
_("Bone + Skin II"): "Bone + Skin II.plist",
_("Dark bone"): "Dark Bone.plist",
_("Glossy"): "Glossy.plist",
_("Glossy II"): "Glossy II.plist",
_("Gold bone"): "Gold Bone.plist",
_("High contrast"): "High Contrast.plist",
_("Low contrast"): "Low Contrast.plist",
_("Soft on white"): "Soft on White.plist",
_("Mid contrast"): "Mid Contrast.plist",
_("MIP"): "MIP.plist",
_("No shading"): "No Shading.plist",
_("Pencil"): "Pencil.plist",
_("Red on white"): "Red on White.plist",
_("Skin on blue"): "Skin On Blue.plist",
_("Skin on blue II"): "Skin On Blue II.plist",
_("Soft on white"): "Soft on White.plist",
_("Soft + Skin"): "Soft + Skin.plist",
_("Soft + Skin II"): "Soft + Skin II.plist",
_("Soft + Skin III"): "Soft + Skin III.plist",
_("Soft on blue"): "Soft On Blue.plist",
_("Soft"): "Soft.plist",
_("Standard"): "Standard.plist",
_("Vascular"): "Vascular.plist",
_("Vascular II"): "Vascular II.plist",
_("Vascular III"): "Vascular III.plist",
_("Vascular IV"): "Vascular IV.plist",
_("Yellow bone"): "Yellow Bone.plist"}
#RAYCASTING_TYPES = [_(filename.split(".")[0]) for filename in
# os.listdir(folder) if
# os.path.isfile(os.path.join(folder,filename))]
LOG_FOLDER = os.path.join(os.path.expanduser('~'), '.invesalius', 'logs')
if not os.path.isdir(LOG_FOLDER):
os.makedirs(LOG_FOLDER)
folder = os.path.join(os.path.expanduser('~'), '.invesalius', 'presets')
if not os.path.isdir(folder):
os.makedirs(folder)
USER_RAYCASTING_PRESETS_DIRECTORY = folder
RAYCASTING_TYPES = [_(filename.split(".")[0]) for filename in
os.listdir(folder) if
os.path.isfile(os.path.join(folder,filename))]
RAYCASTING_TYPES += RAYCASTING_FILES.keys()
RAYCASTING_TYPES.append(_(' Off'))
RAYCASTING_TYPES.sort()
RAYCASTING_OFF_LABEL = _(' Off')
RAYCASTING_TOOLS = [_("Cut plane")]
# If 0 dont't blur, 1 blur
RAYCASTING_WWWL_BLUR = 0
RAYCASTING_PRESETS_FOLDERS = (RAYCASTING_PRESETS_DIRECTORY,
USER_RAYCASTING_PRESETS_DIRECTORY)
####
#MODE_ZOOM = 0 #"Set Zoom Mode",
#MODE_ZOOM_SELECTION = 1 #:"Set Zoom Select Mode",
#MODE_ROTATE = 2#:"Set Spin Mode",
#MODE_MOVE = 3#:"Set Pan Mode",
#MODE_WW_WL = 4#:"Bright and contrast adjustment"}
#MODE_LINEAR_MEASURE = 5
# self.states = {0:"Set Zoom Mode", 1:"Set Zoom Select Mode",
# 2:"Set Spin Mode", 3:"Set Pan Mode",
# 4:"Bright and contrast adjustment"}
#ps.Publisher().sendMessage('Set interaction mode %d'%
# (MODE_BY_ID[id]))
#('Set Editor Mode')
#{0:"Set Change Slice Mode"}
####
MODE_SLICE_SCROLL = -1
MODE_SLICE_EDITOR = -2
MODE_SLICE_CROSS = -3
############
FILETYPE_IV = wx.NewId()
FILETYPE_RIB = wx.NewId()
FILETYPE_STL = wx.NewId()
FILETYPE_STL_ASCII = wx.NewId()
FILETYPE_VRML = wx.NewId()
FILETYPE_OBJ = wx.NewId()
FILETYPE_VTP = wx.NewId()
FILETYPE_PLY = wx.NewId()
FILETYPE_X3D = wx.NewId()
FILETYPE_IMAGEDATA = wx.NewId()
FILETYPE_BMP = wx.NewId()
FILETYPE_JPG = wx.NewId()
FILETYPE_PNG = wx.NewId()
FILETYPE_PS = wx.NewId()
FILETYPE_POV = wx.NewId()
FILETYPE_TIF = wx.NewId()
IMAGE_TILING = {"1 x 1":(1,1), "1 x 2":(1,2),
"1 x 3":(1,3), "1 x 4":(1,4),
"2 x 1":(2,1), "2 x 2":(2,2),
"2 x 3":(2,3), "2 x 4":(2,4),
"3 x 1":(3,1), "3 x 2":(3,2),
"3 x 3":(3,3), "3 x 4":(3,4),
"4 x 1":(4,1), "4 x 2":(4,2),
"4 x 3":(4,3), "4 x 4":(4,4),
"4 x 5":(4,5), "5 x 4":(5,4)}
VTK_WARNING = 0
#----------------------------------------------------------
[ID_DICOM_IMPORT, ID_PROJECT_OPEN, ID_PROJECT_SAVE_AS, ID_PROJECT_SAVE,
ID_PROJECT_CLOSE, ID_PROJECT_INFO, ID_SAVE_SCREENSHOT, ID_DICOM_LOAD_NET,
ID_PRINT_SCREENSHOT, ID_IMPORT_OTHERS_FILES, ID_PREFERENCES, ID_DICOM_NETWORK,
ID_TIFF_JPG_PNG, ID_VIEW_INTERPOLATED, ID_MODE_NAVIGATION, ID_ANALYZE_IMPORT,
ID_NIFTI_IMPORT, ID_PARREC_IMPORT] = [wx.NewId() for number in range(18)]
ID_EXIT = wx.ID_EXIT
ID_ABOUT = wx.ID_ABOUT
[ID_EDIT_UNDO, ID_EDIT_REDO, ID_EDIT_LIST] =\
[wx.NewId() for number in range(3)]
[ID_TOOL_PROJECT, ID_TOOL_LAYOUT, ID_TOOL_OBJECT, ID_TOOL_SLICE] =\
[wx.NewId() for number in range(4)]
[ID_TASK_BAR, ID_VIEW_FOUR] =\
[wx.NewId() for number in range(2)]
[ID_VIEW_FULL, ID_VIEW_TEXT, ID_VIEW_3D_BACKGROUND] =\
[wx.NewId() for number in range(3)]
ID_START = wx.NewId()
ID_FLIP_X = wx.NewId()
ID_FLIP_Y = wx.NewId()
ID_FLIP_Z = wx.NewId()
ID_SWAP_XY = wx.NewId()
ID_SWAP_XZ = wx.NewId()
ID_SWAP_YZ = wx.NewId()
ID_BOOLEAN_MASK = wx.NewId()
ID_CLEAN_MASK = wx.NewId()
ID_REORIENT_IMG = wx.NewId()
ID_FLOODFILL_MASK = wx.NewId()
ID_FILL_HOLE_AUTO = wx.NewId()
ID_REMOVE_MASK_PART = wx.NewId()
ID_SELECT_MASK_PART = wx.NewId()
ID_MANUAL_SEGMENTATION = wx.NewId()
ID_WATERSHED_SEGMENTATION = wx.NewId()
ID_THRESHOLD_SEGMENTATION = wx.NewId()
ID_FLOODFILL_SEGMENTATION = wx.NewId()
ID_CROP_MASK = wx.NewId()
#---------------------------------------------------------
STATE_DEFAULT = 1000
STATE_WL = 1001
STATE_SPIN = 1002
STATE_ZOOM = 1003
STATE_ZOOM_SL = 1004
STATE_PAN = 1005
STATE_ANNOTATE = 1006
STATE_MEASURE_DISTANCE = 1007
STATE_MEASURE_ANGLE = 1008
SLICE_STATE_CROSS = 3006
SLICE_STATE_SCROLL = 3007
SLICE_STATE_EDITOR = 3008
SLICE_STATE_WATERSHED = 3009
SLICE_STATE_REORIENT = 3010
SLICE_STATE_MASK_FFILL = 3011
SLICE_STATE_REMOVE_MASK_PARTS = 3012
SLICE_STATE_SELECT_MASK_PARTS = 3013
SLICE_STATE_FFILL_SEGMENTATION = 3014
SLICE_STATE_CROP_MASK = 3015
VOLUME_STATE_SEED = 2001
# STATE_LINEAR_MEASURE = 3001
# STATE_ANGULAR_MEASURE = 3002
TOOL_STATES = [STATE_WL, STATE_SPIN, STATE_ZOOM,
STATE_ZOOM_SL, STATE_PAN, STATE_MEASURE_DISTANCE,
STATE_MEASURE_ANGLE] #, STATE_ANNOTATE]
TOOL_SLICE_STATES = [SLICE_STATE_CROSS, SLICE_STATE_SCROLL,
SLICE_STATE_REORIENT]
SLICE_STYLES = TOOL_STATES + TOOL_SLICE_STATES
SLICE_STYLES.append(STATE_DEFAULT)
SLICE_STYLES.append(SLICE_STATE_EDITOR)
SLICE_STYLES.append(SLICE_STATE_WATERSHED)
SLICE_STYLES.append(SLICE_STATE_MASK_FFILL)
SLICE_STYLES.append(SLICE_STATE_REMOVE_MASK_PARTS)
SLICE_STYLES.append(SLICE_STATE_SELECT_MASK_PARTS)
SLICE_STYLES.append(SLICE_STATE_FFILL_SEGMENTATION)
SLICE_STYLES.append(SLICE_STATE_CROP_MASK)
VOLUME_STYLES = TOOL_STATES + [VOLUME_STATE_SEED, STATE_MEASURE_DISTANCE,
STATE_MEASURE_ANGLE]
VOLUME_STYLES.append(STATE_DEFAULT)
STYLE_LEVEL = {SLICE_STATE_EDITOR: 1,
SLICE_STATE_WATERSHED: 1,
SLICE_STATE_MASK_FFILL: 2,
SLICE_STATE_REMOVE_MASK_PARTS: 2,
SLICE_STATE_SELECT_MASK_PARTS: 2,
SLICE_STATE_FFILL_SEGMENTATION: 2,
SLICE_STATE_CROSS: 2,
SLICE_STATE_SCROLL: 2,
SLICE_STATE_REORIENT: 2,
SLICE_STATE_CROP_MASK: 1,
STATE_ANNOTATE: 2,
STATE_DEFAULT: 0,
STATE_MEASURE_ANGLE: 2,
STATE_MEASURE_DISTANCE: 2,
STATE_WL: 2,
STATE_SPIN: 2,
STATE_ZOOM: 2,
STATE_ZOOM_SL: 2,
STATE_PAN:2,
VOLUME_STATE_SEED:1}
#------------ Prefereces options key ------------
RENDERING = 0
SURFACE_INTERPOLATION = 1
LANGUAGE = 2
SLICE_INTERPOLATION = 3
#Correlaction extracted from pyDicom
DICOM_ENCODING_TO_PYTHON = {
'None':'iso8859',
None:'iso8859',
'': 'iso8859',
'ISO_IR 6': 'iso8859',
'ISO_IR 100': 'latin_1',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 13': 'iso2022_jp',
'ISO 2022 IR 149': 'euc_kr',
'ISO_IR 192': 'UTF8',
'GB18030': 'GB18030',
'ISO_IR 126': 'iso_ir_126',
'ISO_IR 127': 'iso_ir_127',
'ISO_IR 138': 'iso_ir_138',
'ISO_IR 144': 'iso_ir_144',
}
#-------------------- Projections type ----------------
PROJECTION_NORMAL=0
PROJECTION_MaxIP=1
PROJECTION_MinIP=2
PROJECTION_MeanIP=3
PROJECTION_LMIP=4
PROJECTION_MIDA=5
PROJECTION_CONTOUR_MIP=6
PROJECTION_CONTOUR_LMIP=7
PROJECTION_CONTOUR_MIDA=8
#------------ Projections defaults ------------------
PROJECTION_BORDER_SIZE=1.0
PROJECTION_MIP_SIZE=2
# ------------- Boolean operations ------------------
BOOLEAN_UNION = 1
BOOLEAN_DIFF = 2
BOOLEAN_AND = 3
BOOLEAN_XOR = 4
#------------ Navigation defaults -------------------
SELECT = 0
MTC = 1
FASTRAK = 2
ISOTRAKII = 3
PATRIOT = 4
DEBUGTRACK = 5
DISCTRACK = 6
DEFAULT_TRACKER = SELECT
TRACKER = [_("Select tracker:"), _("Claron MicronTracker"),
_("Polhemus FASTRAK"), _("Polhemus ISOTRAK II"),
_("Polhemus PATRIOT"), _("Debug tracker"),
_("Disconnect tracker")]
STATIC_REF = 0
DYNAMIC_REF = 1
DEFAULT_REF_MODE = DYNAMIC_REF
REF_MODE = [_("Static ref."), _("Dynamic ref.")]
IR1 = wx.NewId()
IR2 = wx.NewId()
IR3 = wx.NewId()
TR1 = wx.NewId()
TR2 = wx.NewId()
TR3 = wx.NewId()
SET = wx.NewId()
BTNS_IMG = {IR1: {0: _('LEI')},
IR2: {1: _('REI')},
IR3: {2: _('NAI')}}
BTNS_IMG_MKS = {IR1: {0: 'LEI'},
IR2: {1: 'REI'},
IR3: {2: 'NAI'}}
TIPS_IMG = [wx.ToolTip(_("Select left ear in image")),
wx.ToolTip(_("Select right ear in image")),
wx.ToolTip(_("Select nasion in image"))]
BTNS_TRK = {TR1: {3: _('LET')},
TR2: {4: _('RET')},
TR3: {5: _('NAT')},
SET: {6: _('SET')}}
TIPS_TRK = [wx.ToolTip(_("Select left ear with spatial tracker")),
wx.ToolTip(_("Select right ear with spatial tracker")),
wx.ToolTip(_("Select nasion with spatial tracker")),
wx.ToolTip(_("Show set coordinates in image"))]
CAL_DIR = os.path.abspath(os.path.join(FILE_PATH, '..', 'navigation', 'mtc_files', 'CalibrationFiles'))
MAR_DIR = os.path.abspath(os.path.join(FILE_PATH, '..', 'navigation', 'mtc_files', 'Markers'))
|
fabio-otsuka/invesalius3
|
invesalius/constants.py
|
Python
|
gpl-2.0
| 25,058
|
[
"VTK"
] |
52005fe0d876b136f93374039fc519adc48970a07ed58deb35cc285f1523f543
|
# -*- coding: utf-8 -*-"""
"""
Created on Wed Aug 6 11:06:31 2014
@author: schmidan
"""
import ctypes
import numpy as np
def printCov():
print(r""" available covariance functions:
cov functions:
CovLinearard
CovLinearone
CovMatern3iso
CovMatern5iso
CovNoise
CovRQiso
CovSEard
CovSEiso
CovPeriodicMatern3iso
CovPeriodic
CovSum
CovProd
InputDimFilter
use like:
\"CovSum ( CovSEiso, CovNoise)\"
""")
class GP(object):
'''Gaussian Process class (C interface)
available covariance functions:
cov functions:
CovLinearard
CovLinearone
CovMatern3iso
CovMatern5iso
CovNoise
CovRQiso
CovSEard
CovSEiso
CovPeriodicMatern3iso
CovPeriodic
CovSum
CovProd
InputDimFilter
use like:
"CovSum ( CovSEiso, CovNoise)"
'''
def __init__(self,ndim,covf):
#print("__init__(self,ndim,covf):")
print('gaupro v0.1')
self.libgp = ctypes.cdll.LoadLibrary('libgaupro2.so')
self.new = self.libgp.gp_new
self.new.restype = ctypes.c_void_p
self.new.argtypes = [ctypes.c_size_t, ctypes.c_char_p]
self.libgp_ptr = self.new(ndim, covf.encode("ascii"))
#self.libgp_ptr = self.libgp.gp_new(ctypes.c_size_t(ndim), ctypes.c_char_p(covf.encode("ascii")))
#print("libgp_ptr:\n"+str(self.libgp_ptr))
self.libgp.gp_get_loghyperparam_dim.argtypes = [ctypes.c_void_p]
dim = self.libgp.gp_get_loghyperparam_dim(self.libgp_ptr)
#print("dim = "+str(dim))
self.ndim = ndim
self.covf = covf
self.parameters = {}
self.parameters['covf'] = covf
self.parameters['ndim'] = ndim
# print("__init__(self,ndim,covf):")
def printSolvers(self):
"""printing optimizers to choose from
"""
self.libgp.gp_printSolvers(ctypes.c_void_p(self.libgp_ptr))
def train(self, x, y, optimizer="rprop", opti_iters=100, eps_stop=0.0):#optimizer="cg" or "rprop"
"""choosable optimizers:
"""
self.parameters['optimizer'] = optimizer
self.parameters['opti_iters'] = opti_iters
if x.shape[0] == y.shape[0]:
self.libgp.gp_add_train(ctypes.c_void_p(self.libgp_ptr), x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.c_int(len(x.shape)), x.ctypes.shape_as(ctypes.c_int),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
)
# set hyperparam starting point before optimizing
self.optimize = self.libgp.gp_optimize
self.optimize.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_double]
self.optimize.restype = np.ctypeslib.ndpointer(dtype=ctypes.c_double, shape=(opti_iters,))
self.likelihood_curve = self.optimize(self.libgp_ptr, optimizer.encode("ascii"), opti_iters, eps_stop)
#print("finished GP training")
print("log_likelihood = "+str(self.get_log_likelihood()))
self.trained = True
else:
print("ERROR: x.shape[1] == y.shape[0]")
def predict(self, x):
#print("dffsfs")
if self.trained:
self.pred = self.libgp.gp_predict_value
#print(x.shape[0]*2)
if len(x.shape) == 1:
#print("if len(x.shape) == 1:")
self.pred.restype = np.ctypeslib.ndpointer(dtype=ctypes.c_double, shape=(1+1,))
self.pred.argtypes = [ctypes.c_void_p, np.ctypeslib.ndpointer(ctypes.c_double),
ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
values_variance = self.pred(self.libgp_ptr, x, len(x.shape), x.ctypes.shape_as(ctypes.c_int))
self.predicted_values = np.copy(values_variance[0])
self.predicted_variances = np.copy(values_variance[1])
#return np.array([self.predicted_values]), np.array([self.predicted_variances])
return self.predicted_values, self.predicted_variances
else:
#print("if len(x.shape) == 1: ELSE")
self.pred.restype = np.ctypeslib.ndpointer(dtype=ctypes.c_double, shape=(x.shape[0]+x.shape[0],))
self.pred.argtypes = [ctypes.c_void_p, np.ctypeslib.ndpointer(ctypes.c_double),
ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
#values_variance = self.predict(self.libgp_ptr, x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
# ctypes.c_int(len(x.shape)), x.ctypes.shape_as(ctypes.c_int)
# )
#values_variance = self.predict(self.libgp_ptr, x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
# ctypes.c_int(len(x.shape)), x.ctypes.shape_as(ctypes.c_int)
# )
values_variance = self.pred(self.libgp_ptr, x, len(x.shape), x.ctypes.shape_as(ctypes.c_int))
self.predicted_values = np.copy(values_variance[0:x.shape[0]])
self.predicted_variances = np.copy(values_variance[x.shape[0]:])
return self.predicted_values, self.predicted_variances
#return np.array([self.predicted_values]), np.array([self.predicted_variances])
else:
print("need trainign before")
def get_loghyper_dims(self):
self.libgp.gp_get_loghyperparam_dim.restype = ctypes.c_int
self.libgp.gp_get_loghyperparam_dim.argtypes = [ctypes.c_void_p]
return self.libgp.gp_get_loghyperparam_dim(self.libgp_ptr)
def set_loghyper(self, loghyperparam):
self.libgp.gp_get_loghyperparam_dim.restype = ctypes.c_int
self.libgp.gp_get_loghyperparam_dim.argtypes = [ctypes.c_void_p]
dim = self.libgp.gp_get_loghyperparam_dim(self.libgp_ptr)
if (len(loghyperparam.shape) == 1 and loghyperparam.shape[0] == dim):
self.libgp.gp_set_loghyper(ctypes.c_void_p(self.libgp_ptr), loghyperparam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.c_int(loghyperparam.shape[0]))
else:
print("loghyperparam dims do not match (python)")
def set_constraints(self, lowerConstraints, upperConstraints):
"""setting the constraints for the hyperparameters.
dimensions need to match with the dims of the hyperparameters
:param lowerConstraints: The lower constraint.
:param upperConstraints: The upper constraint.
"""
self.libgp.gp_get_loghyperparam_dim.restype = ctypes.c_int
self.libgp.gp_get_loghyperparam_dim.argtypes = [ctypes.c_void_p]
dim = self.libgp.gp_get_loghyperparam_dim(self.libgp_ptr)
if (len(lowerConstraints.shape) == 1 and lowerConstraints.shape[0] == dim):
if (len(upperConstraints.shape) == 1 and upperConstraints.shape[0] == dim):
#virtual void set_constraints(const double lower[], const double upper[]);
self.libgp.gp_set_loghyper_constraints( ctypes.c_void_p(self.libgp_ptr),
lowerConstraints.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
upperConstraints.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.c_int(lowerConstraints.shape[0]) )
else:
print('Error (python): upperConstraints.shape not matching hyperparams shape')
else:
print("Error (python): lowerConstraints.shape not matching hyperparams shape")
def get_loghyper(self):
self.libgp.gp_get_loghyper_len.restype = ctypes.c_int
self.libgp.gp_get_loghyper_len.argtypes = [ctypes.c_void_p]
lengths = self.libgp.gp_get_loghyper_len(self.libgp_ptr)
#print(lengths)
#gp_get_loghyper = self.libgp.gp_get_loghyper
self.libgp.gp_get_loghyper.restype = np.ctypeslib.ndpointer(dtype=ctypes.c_double, shape=(lengths,))
self.libgp.gp_get_loghyper.argtypes = [ctypes.c_void_p, ctypes.c_int]
#gp_get_loghyper.restype = np.ctypeslib.ndpointer(dtype=ctypes.c_double, shape=(lengths,))
#gp_get_loghyper.argtypes = [ctypes.c_int, ctypes.c_int]
self.loghyperparam = self.libgp.gp_get_loghyper(self.libgp_ptr, lengths)
return self.loghyperparam
def get_log_likelihood(self):
self.libgp.gp_get_loglikelihood.restype = ctypes.c_double
self.libgp.gp_get_loglikelihood.argtypes = [ctypes.c_void_p]
return self.libgp.gp_get_loglikelihood(self.libgp_ptr)
|
Luk0r/gaupro
|
python/gaupro2.py
|
Python
|
mit
| 9,196
|
[
"Gaussian"
] |
7c7d40647e4b1b35b11e1b9034a27020d966645ad9f0cbdb112a25d6725025d2
|
"""
Analysis of the origin of the diffraction pattern based on indexed and
measured intensities.
"""
from __future__ import annotations
def cctbx_crystal_from_dials(crystal):
space_group = crystal.get_space_group()
unit_cell = crystal.get_unit_cell()
from cctbx.crystal import symmetry as crystal_symmetry
return crystal_symmetry(unit_cell, space_group.type().lookup_symbol())
def cctbx_i_over_sigi_ms_from_dials_data(reflections, cctbx_crystal_symmetry):
from cctbx.miller import set as miller_set
from dials.array_family import flex
refl = reflections.select(reflections["intensity.sum.variance"] > 0)
return miller_set(cctbx_crystal_symmetry, refl["miller_index"]).array(
data=refl["intensity.sum.value"],
sigmas=flex.sqrt(refl["intensity.sum.variance"]),
)
def offset_miller_indices(indices, offset):
from dials.array_family import flex
return flex.miller_index(
*[mi.iround() for mi in (indices.as_vec3_double() + offset).parts()]
)
def compute_miller_set_correlation(
ms_a, ms_b, map_to_asu=False, merge_equivalents=False
):
"""Compute correlation between two miller arrays.
Args:
ms_a (cctbx.miller.array): Input miller.array `a`.
ms_b (cctbx.miller.array): Input miller.array `b`.
map_to_asu (bool): If ``True``, then map miller indices to the asymmetric
unit before matching miller indices between input miller arrays.
merge_equivalents (bool): If ``True`` then merge symmetry equivalent
reflections before matching miller indices between input miller arrays.
Returns:
tuple[int, float]: A tuple of the number of observations and the correlation
coefficient.
"""
if map_to_asu:
# not obvious that this will help for the reasons stated below
ms_a = ms_a.map_to_asu()
ms_b = ms_b.map_to_asu()
if merge_equivalents:
# only want to do this if we have essentially "scaled" the data - if not
# then we will get a smooth Wilson plot and about CC=1 (due to general
# fall off with resolution)
ms_a = ms_a.merge_equivalents().array()
ms_b = ms_b.merge_equivalents().array()
common_a, common_b = ms_a.common_sets(ms_b)
return common_a.size(), common_a.correlation(common_b).coefficient()
def get_hkl_offset_correlation_coefficients(
dials_reflections,
dials_crystal,
map_to_asu=False,
grid_h=0,
grid_k=0,
grid_l=0,
reference=None,
):
# N.B. deliberately ignoring d_min, d_max as these are inconsistent with
# changing the miller indices
from cctbx import sgtbx
from cctbx.miller import set as miller_set
from dials.array_family import flex
cs = cctbx_crystal_from_dials(dials_crystal)
ms = cctbx_i_over_sigi_ms_from_dials_data(dials_reflections, cs)
if reference:
reference_ms = cctbx_i_over_sigi_ms_from_dials_data(reference, cs)
else:
reference_ms = None
ccs = flex.double()
offsets = flex.vec3_int()
nref = flex.size_t()
if reference:
cb_op = sgtbx.change_of_basis_op("x,y,z")
else:
cb_op = sgtbx.change_of_basis_op("-x,-y,-z")
hkl_test = [
(h, k, l)
for h in range(-grid_h, grid_h + 1)
for k in range(-grid_k, grid_k + 1)
for l in range(-grid_l, grid_l + 1)
]
for hkl in hkl_test:
indices = offset_miller_indices(ms.indices(), hkl)
reindexed_indices = cb_op.apply(indices)
rms = miller_set(cs, reindexed_indices).array(ms.data())
if reference_ms:
_ms = reference_ms
else:
_ms = miller_set(cs, indices).array(ms.data())
n, cc = compute_miller_set_correlation(_ms, rms, map_to_asu=map_to_asu)
ccs.append(cc)
offsets.append(hkl)
nref.append(n)
return offsets, ccs, nref
|
dials/dials
|
algorithms/symmetry/origin.py
|
Python
|
bsd-3-clause
| 3,885
|
[
"CRYSTAL"
] |
9f73b5d30b661fddfc1d55c793b624fd864615fdde93a0a40187a1fddd7173f7
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Computer vision functions (FAST, ORB, etc)
"""
from .library import *
from .array import *
from .features import *
def fast(image, threshold=20.0, arc_length=9, non_max=True, feature_ratio=0.05, edge=3):
"""
FAST feature detector.
Parameters
----------
image : af.Array
A 2D array representing an image.
threshold : scalar. optional. default: 20.0.
FAST threshold for which a pixel of the circle around a central pixel is consdered.
arc_length : scalar. optional. default: 9
The minimum length of arc length to be considered. Max length should be 16.
non_max : Boolean. optional. default: True
A boolean flag specifying if non max suppression has to be performed.
feature_ratio : scalar. optional. default: 0.05 (5%)
Specifies the maximum ratio of features to pixels in the image.
edge : scalar. optional. default: 3.
Specifies the number of edge rows and columns to be ignored.
Returns
---------
features : af.Features()
Contains the location and score. Orientation and size are not computed.
"""
out = Features()
safe_call(backend.get().af_fast(c_pointer(out.feat),
image.arr, c_float_t(threshold), c_uint_t(arc_length), non_max,
c_float_t(feature_ratio), c_uint_t(edge)))
return out
def harris(image, max_corners=500, min_response=1E5, sigma=1.0, block_size=0, k_thr=0.04):
"""
Harris corner detector.
Parameters
----------
image : af.Array
A 2D array specifying an image.
max_corners : scalar. optional. default: 500.
Specifies the maximum number of corners to be calculated.
min_response : scalar. optional. default: 1E5
Specifies the cutoff score for a corner to be considered
sigma : scalar. optional. default: 1.0
- Specifies the standard deviation of a circular window.
- Only used when block_size == 0. Must be >= 0.5 and <= 5.0.
block_size : scalar. optional. default: 0
Specifies the window size.
k_thr : scalar. optional. default: 0.04
Harris constant. must be >= 0.01
Returns
---------
features : af.Features()
Contains the location and score. Orientation and size are not computed.
Note
------
The covariation matrix will be square when `block_size` is used and circular when `sigma` is used.
"""
out = Features()
safe_call(backend.get().af_harris(c_pointer(out.feat),
image.arr, c_uint_t(max_corners), c_float_t(min_response),
c_float_t(sigma), c_uint_t(block_size), c_float_t(k_thr)))
return out
def orb(image, threshold=20.0, max_features=400, scale = 1.5, num_levels = 4, blur_image = False):
"""
ORB Feature descriptor.
Parameters
----------
image : af.Array
A 2D array representing an image.
threshold : scalar. optional. default: 20.0.
FAST threshold for which a pixel of the circle around a central pixel is consdered.
max_features : scalar. optional. default: 400.
Specifies the maximum number of features to be considered.
scale : scalar. optional. default: 1.5.
Specifies the factor by which images are down scaled at each level.
num_levles : scalar. optional. default: 4.
Specifies the number of levels used in the image pyramid.
blur_image : Boolean. optional. default: False.
Flag specifying if the input has to be blurred before computing descriptors.
A gaussian filter with sigma = 2 is applied if True.
Returns
---------
(features, descriptor) : tuple of (af.Features(), af.Array)
- descriptor is an af.Array of size N x 8
"""
feat = Features()
desc = Array()
safe_call(backend.get().af_orb(c_pointer(feat.feat), c_pointer(desc.arr), image.arr,
c_float_t(threshold), c_uint_t(max_features),
c_float_t(scale), c_uint_t(num_levels), blur_image))
return feat, desc
def hamming_matcher(query, database, dim = 0, num_nearest = 1):
"""
Hamming distance matcher.
Parameters
-----------
query : af.Array
A query feature descriptor
database : af.Array
A multi dimensional array containing the feature descriptor database.
dim : scalar. optional. default: 0.
Specifies the dimension along which feature descriptor lies.
num_nearest: scalar. optional. default: 1.
Specifies the number of nearest neighbors to find.
Returns
---------
(location, distance): tuple of af.Array
location and distances of closest matches.
"""
index = Array()
dist = Array()
safe_call(backend.get().af_hamming_matcher(c_pointer(index.arr), c_pointer(dist.arr),
query.arr, database.arr,
c_dim_t(dim), c_dim_t(num_nearest)))
return index, dist
def nearest_neighbour(query, database, dim = 0, num_nearest = 1, match_type=MATCH.SSD):
"""
Nearest Neighbour matcher.
Parameters
-----------
query : af.Array
A query feature descriptor
database : af.Array
A multi dimensional array containing the feature descriptor database.
dim : scalar. optional. default: 0.
Specifies the dimension along which feature descriptor lies.
num_nearest: scalar. optional. default: 1.
Specifies the number of nearest neighbors to find.
match_type: optional: af.MATCH. default: af.MATCH.SSD
Specifies the match function metric.
Returns
---------
(location, distance): tuple of af.Array
location and distances of closest matches.
"""
index = Array()
dist = Array()
safe_call(backend.get().af_nearest_neighbour(c_pointer(index.arr), c_pointer(dist.arr),
query.arr, database.arr,
c_dim_t(dim), c_dim_t(num_nearest),
match_type.value))
return index, dist
def match_template(image, template, match_type = MATCH.SAD):
"""
Find the closest match of a template in an image.
Parameters
----------
image : af.Array
A multi dimensional array specifying an image or batch of images.
template : af.Array
A multi dimensional array specifying a template or batch of templates.
match_type: optional: af.MATCH. default: af.MATCH.SAD
Specifies the match function metric.
Returns
--------
out : af.Array
An array containing the score of the match at each pixel.
"""
out = Array()
safe_call(backend.get().af_match_template(c_pointer(out.arr),
image.arr, template.arr,
match_type.value))
return out
def susan(image, radius=3, diff_thr=32, geom_thr=10, feature_ratio=0.05, edge=3):
"""
SUSAN corner detector.
Parameters
----------
image : af.Array
A 2D array specifying an image.
radius : scalar. optional. default: 500.
Specifies the radius of each pixel neighborhood.
diff_thr : scalar. optional. default: 1E5
Specifies the intensity difference threshold.
geom_thr : scalar. optional. default: 1.0
Specifies the geometric threshold.
feature_ratio : scalar. optional. default: 0.05 (5%)
Specifies the ratio of corners found to number of pixels.
edge : scalar. optional. default: 3
Specifies the number of edge rows and columns that are ignored.
Returns
---------
features : af.Features()
Contains the location and score. Orientation and size are not computed.
"""
out = Features()
safe_call(backend.get().af_susan(c_pointer(out.feat),
image.arr, c_uint_t(radius), c_float_t(diff_thr),
c_float_t(geom_thr), c_float_t(feature_ratio),
c_uint_t(edge)))
return out
def dog(image, radius1, radius2):
"""
Difference of gaussians.
Parameters
----------
image : af.Array
A 2D array specifying an image.
radius1 : scalar.
The radius of first gaussian kernel.
radius2 : scalar.
The radius of second gaussian kernel.
Returns
--------
out : af.Array
A multi dimensional array containing the difference of gaussians.
Note
------
The sigma values are calculated to be 0.25 * radius.
"""
out = Array()
safe_call(backend.get().af_dog(c_pointer(out.arr),
image.arr, radius1, radius2))
return out
def sift(image, num_layers=3, contrast_threshold=0.04, edge_threshold=10.0, initial_sigma = 1.6,
double_input = True, intensity_scale = 0.00390625, feature_ratio = 0.05):
"""
SIFT feature detector and descriptor.
Parameters
----------
image : af.Array
A 2D array representing an image
num_layers : optional: integer. Default: 3
Number of layers per octave. The number of octaves is calculated internally.
contrast_threshold : optional: float. Default: 0.04
Threshold used to filter out features that have low contrast.
edge_threshold : optional: float. Default: 10.0
Threshold used to filter out features that are too edge-like.
initial_sigma : optional: float. Default: 1.6
The sigma value used to filter the input image at the first octave.
double_input : optional: bool. Default: True
If True, the input image will be scaled to double the size for the first octave.
intensity_scale : optional: float. Default: 1.0/255
The inverse of the difference between maximum and minimum intensity values.
feature_ratio : optional: float. Default: 0.05
Specifies the maximum number of features to detect as a ratio of image pixels.
Returns
--------
(features, descriptor) : tuple of (af.Features(), af.Array)
- descriptor is an af.Array of size N x 128
"""
feat = Features()
desc = Array()
safe_call(backend.get().af_sift(c_pointer(feat.feat), c_pointer(desc.arr),
image.arr, num_layers, c_float_t(contrast_threshold), c_float_t(edge_threshold),
c_float_t(initial_sigma), double_input, c_float_t(intensity_scale), c_float_t(feature_ratio)))
return (feat, desc)
def gloh(image, num_layers=3, contrast_threshold=0.04, edge_threshold=10.0, initial_sigma = 1.6,
double_input = True, intensity_scale = 0.00390625, feature_ratio = 0.05):
"""
GLOH feature detector and descriptor.
Parameters
----------
image : af.Array
A 2D array representing an image
num_layers : optional: integer. Default: 3
Number of layers per octave. The number of octaves is calculated internally.
contrast_threshold : optional: float. Default: 0.04
Threshold used to filter out features that have low contrast.
edge_threshold : optional: float. Default: 10.0
Threshold used to filter out features that are too edge-like.
initial_sigma : optional: float. Default: 1.6
The sigma value used to filter the input image at the first octave.
double_input : optional: bool. Default: True
If True, the input image will be scaled to double the size for the first octave.
intensity_scale : optional: float. Default: 1.0/255
The inverse of the difference between maximum and minimum intensity values.
feature_ratio : optional: float. Default: 0.05
Specifies the maximum number of features to detect as a ratio of image pixels.
Returns
--------
(features, descriptor) : tuple of (af.Features(), af.Array)
- descriptor is an af.Array of size N x 272
"""
feat = Features()
desc = Array()
safe_call(backend.get().af_gloh(c_pointer(feat.feat), c_pointer(desc.arr),
image.arr, num_layers, c_float_t(contrast_threshold),
c_float_t(edge_threshold), c_float_t(initial_sigma),
double_input, c_float_t(intensity_scale),
c_float_t(feature_ratio)))
return (feat, desc)
def homography(x_src, y_src, x_dst, y_dst, htype = HOMOGRAPHY.RANSAC,
ransac_threshold = 3.0, iters = 1000, out_type = Dtype.f32):
"""
Homography estimation
Parameters
----------
x_src : af.Array
A list of x co-ordinates of the source points.
y_src : af.Array
A list of y co-ordinates of the source points.
x_dst : af.Array
A list of x co-ordinates of the destination points.
y_dst : af.Array
A list of y co-ordinates of the destination points.
htype : optional: af.HOMOGRAPHY. Default: HOMOGRAPHY.RANSAC
htype can be one of
- HOMOGRAPHY.RANSAC: RANdom SAmple Consensus will be used to evaluate quality.
- HOMOGRAPHY.LMEDS : Least MEDian of Squares is used to evaluate quality.
ransac_threshold : optional: scalar. Default: 3.0
If `htype` is HOMOGRAPHY.RANSAC, it specifies the L2-distance threshold for inliers.
out_type : optional. af.Dtype. Default: Dtype.f32.
Specifies the output data type.
Returns
-------
(H, inliers) : A tuple of (af.Array, integer)
"""
H = Array()
inliers = c_int_t(0)
safe_call(backend.get().af_homography(c_pointer(H), c_pointer(inliers),
x_src.arr, y_src.arr, x_dst.arr, y_dst.arr,
htype.value, ransac_threshold, iters, out_type.value))
return (H, inliers)
|
arrayfire/arrayfire_python
|
arrayfire/vision.py
|
Python
|
bsd-3-clause
| 15,473
|
[
"Gaussian"
] |
671c59ab54f443c52a81f1723f90bca3ad903271e1c708fdd2b8b7b3bed6ea75
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Gary Burton
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Manages the main window and the pluggable views
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from collections import defaultdict
import os
import time
import datetime
from io import StringIO
import gc
import html
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.cli.grampscli import CLIManager
from .user import User
from .plug import tool
from gramps.gen.plug import START
from gramps.gen.plug import REPORT
from gramps.gen.plug.report._constants import standalone_categories
from .plug import (PluginWindows, ReportPluginDialog, ToolPluginDialog)
from .plug.report import report, BookSelector
from .utils import AvailableUpdates
from .pluginmanager import GuiPluginManager
from gramps.gen.relationship import get_relationship_calculator
from .displaystate import DisplayState, RecentDocsMenu
from gramps.gen.const import (HOME_DIR, ICON, URL_BUGTRACKER, URL_HOMEPAGE,
URL_MAILINGLIST, URL_MANUAL_PAGE, URL_WIKISTRING,
WIKI_EXTRAPLUGINS, URL_BUGHOME)
from gramps.gen.constfunc import is_quartz
from gramps.gen.config import config
from gramps.gen.errors import WindowActiveError
from .dialog import ErrorDialog, WarningDialog, QuestionDialog2, InfoDialog
from .widgets import Statusbar
from .undohistory import UndoHistory
from gramps.gen.utils.file import media_path_full
from .dbloader import DbLoader
from .display import display_help, display_url
from .configure import GrampsPreferences
from .aboutdialog import GrampsAboutDialog
from .navigator import Navigator
from .views.tags import Tags
from .uimanager import ActionGroup, valid_action_name
from gramps.gen.lib import (Person, Surname, Family, Media, Note, Place,
Source, Repository, Citation, Event, EventType,
ChildRef)
from gramps.gui.editors import (EditPerson, EditFamily, EditMedia, EditNote,
EditPlace, EditSource, EditRepository,
EditCitation, EditEvent)
from gramps.gen.db.exceptions import DbWriteFailure
from gramps.gen.filters import reload_custom_filters
from .managedwindow import ManagedWindow
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UNSUPPORTED = ("Unsupported", _("Unsupported"))
WIKI_HELP_PAGE_FAQ = '%s_-_FAQ' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_KEY = '%s_-_Keybindings' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_MAN = '%s' % URL_MANUAL_PAGE
CSS_FONT = """
#view {
font-family: %s;
}
"""
#-------------------------------------------------------------------------
#
# ViewManager
#
#-------------------------------------------------------------------------
class ViewManager(CLIManager):
"""
**Overview**
The ViewManager is the session manager of the program.
Specifically, it manages the main window of the program. It is closely tied
into the Gtk.UIManager to control all menus and actions.
The ViewManager controls the various Views within the Gramps programs.
Views are organised in categories. The categories can be accessed via
a sidebar. Within a category, the different views are accesible via the
toolbar of view menu.
A View is a particular way of looking a information in the Gramps main
window. Each view is separate from the others, and has no knowledge of
the others.
Examples of current views include:
- Person View
- Relationship View
- Family View
- Source View
The View Manager does not have to know the number of views, the type of
views, or any other details about the views. It simply provides the
method of containing each view, and has methods for creating, deleting and
switching between the views.
"""
def __init__(self, app, dbstate, view_category_order, user=None):
"""
The viewmanager is initialised with a dbstate on which Gramps is
working, and a fixed view_category_order, which is the order in which
the view categories are accessible in the sidebar.
"""
CLIManager.__init__(self, dbstate, setloader=False, user=user)
self.view_category_order = view_category_order
self.app = app
#set pluginmanager to GUI one
self._pmgr = GuiPluginManager.get_instance()
self.merge_ids = []
self.toolactions = None
self.tool_menu_ui_id = None
self.reportactions = None
self.report_menu_ui_id = None
self.active_page = None
self.pages = []
self.page_lookup = {}
self.views = None
self.current_views = [] # The current view in each category
self.view_changing = False
self.autobackup_time = time.time() # time of start or last autobackup
self.delay_timer = None # autobackup delay timer for after wakeup
self.prev_has_changed = 0 # db commit count at autobackup time
self.show_navigator = config.get('interface.view')
self.show_toolbar = config.get('interface.toolbar-on')
self.fullscreen = config.get('interface.fullscreen')
self.__build_main_window() # sets self.uistate
if self.user is None:
self.user = User(error=ErrorDialog,
parent=self.window,
callback=self.uistate.pulse_progressbar,
uistate=self.uistate,
dbstate=self.dbstate)
self.__connect_signals()
self.do_reg_plugins(self.dbstate, self.uistate)
reload_custom_filters()
#plugins loaded now set relationship class
self.rel_class = get_relationship_calculator()
self.uistate.set_relationship_class()
# Need to call after plugins have been registered
self.uistate.connect('update-available', self.process_updates)
self.check_for_updates()
# Set autobackup
self.uistate.connect('autobackup', self.autobackup)
self.uistate.set_backup_timer()
def check_for_updates(self):
"""
Check for add-on updates.
"""
howoften = config.get("behavior.check-for-addon-updates")
update = False
if howoften != 0: # update never if zero
year, mon, day = list(map(
int, config.get("behavior.last-check-for-addon-updates").split("/")))
days = (datetime.date.today() - datetime.date(year, mon, day)).days
if howoften == 1 and days >= 30: # once a month
update = True
elif howoften == 2 and days >= 7: # once a week
update = True
elif howoften == 3 and days >= 1: # once a day
update = True
elif howoften == 4: # always
update = True
if update:
AvailableUpdates(self.uistate).start()
def process_updates(self, addon_update_list):
"""
Called when add-on updates are available.
"""
rescan = PluginWindows.UpdateAddons(self.uistate, [],
addon_update_list).rescan
self.do_reg_plugins(self.dbstate, self.uistate, rescan=rescan)
def _errordialog(self, title, errormessage):
"""
Show the error.
In the GUI, the error is shown, and a return happens
"""
ErrorDialog(title, errormessage,
parent=self.uistate.window)
return 1
def __build_main_window(self):
"""
Builds the GTK interface
"""
width = config.get('interface.main-window-width')
height = config.get('interface.main-window-height')
horiz_position = config.get('interface.main-window-horiz-position')
vert_position = config.get('interface.main-window-vert-position')
font = config.get('utf8.selected-font')
self.window = Gtk.ApplicationWindow(application=self.app)
self.app.window = self.window
self.window.set_icon_from_file(ICON)
self.window.set_default_size(width, height)
self.window.move(horiz_position, vert_position)
self.provider = Gtk.CssProvider()
self.change_font(font)
#Set the mnemonic modifier on Macs to alt-ctrl so that it
#doesn't interfere with the extended keyboard, see
#https://gramps-project.org/bugs/view.php?id=6943
if is_quartz():
self.window.set_mnemonic_modifier(
Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.window.add(vbox)
self.hpane = Gtk.Paned()
self.ebox = Gtk.EventBox()
self.navigator = Navigator(self)
self.ebox.add(self.navigator.get_top())
self.hpane.pack1(self.ebox, False, False)
self.hpane.show()
self.notebook = Gtk.Notebook()
self.notebook.set_scrollable(True)
self.notebook.set_show_tabs(False)
self.notebook.show()
self.__init_lists()
self.__build_ui_manager()
self.hpane.add2(self.notebook)
toolbar = self.uimanager.get_widget('ToolBar')
toolbar.show_all()
self.statusbar = Statusbar()
self.statusbar.show()
vbox.pack_end(self.statusbar, False, True, 0)
vbox.pack_start(toolbar, False, True, 0)
vbox.pack_end(self.hpane, True, True, 0)
vbox.show()
self.uistate = DisplayState(self.window, self.statusbar,
self.uimanager, self)
# Create history objects
for nav_type in ('Person', 'Family', 'Event', 'Place', 'Source',
'Citation', 'Repository', 'Note', 'Media'):
self.uistate.register(self.dbstate, nav_type, 0)
self.dbstate.connect('database-changed', self.uistate.db_changed)
self.tags = Tags(self.uistate, self.dbstate)
# handle OPEN Recent Menu, insert it into the toolbar.
self.recent_manager = RecentDocsMenu(
self.uistate, self.dbstate, self._read_recent_file)
self.recent_manager.build(update_menu=False)
self.db_loader = DbLoader(self.dbstate, self.uistate)
self.__setup_navigator()
# need to get toolbar again, because it is a new object now.
toolbar = self.uimanager.get_widget('ToolBar')
if self.show_toolbar:
toolbar.show()
else:
toolbar.hide()
if self.fullscreen:
self.window.fullscreen()
self.window.set_title("%s - Gramps" % _('No Family Tree'))
self.window.show()
def __setup_navigator(self):
"""
If we have enabled te sidebar, show it, and turn off the tabs. If
disabled, hide the sidebar and turn on the tabs.
"""
if self.show_navigator:
self.ebox.show()
else:
self.ebox.hide()
def __connect_signals(self):
"""
Connects the signals needed
"""
self.del_event = self.window.connect('delete-event', self.quit)
self.notebook.connect('switch-page', self.view_changed)
def __init_lists(self):
"""
Initialize the actions lists for the UIManager
"""
self._app_actionlist = [
('quit', self.quit, None if is_quartz() else "<PRIMARY>q"),
('preferences', self.preferences_activate),
('about', self.display_about_box), ]
self._file_action_list = [
#('FileMenu', None, _('_Family Trees')),
('Open', self.__open_activate, "<PRIMARY>o"),
#('OpenRecent'_("Open an existing database")),
#('quit', self.quit, "<PRIMARY>q"),
#('ViewMenu', None, _('_View')),
('Navigator', self.navigator_toggle, "<PRIMARY>m",
self.show_navigator),
('Toolbar', self.toolbar_toggle, '', self.show_toolbar),
('Fullscreen', self.fullscreen_toggle, "F11", self.fullscreen),
#('EditMenu', None, _('_Edit')),
#('preferences', self.preferences_activate),
#('HelpMenu', None, _('_Help')),
('HomePage', home_page_activate),
('MailingLists', mailing_lists_activate),
('ReportBug', report_bug_activate),
('ExtraPlugins', extra_plugins_activate),
#('about', self.display_about_box),
('PluginStatus', self.__plugin_status),
('FAQ', faq_activate),
('KeyBindings', key_bindings),
('UserManual', manual_activate, 'F1'),
('TipOfDay', self.tip_of_day_activate), ]
self._readonly_action_list = [
('Close', self.close_database, "<control>w"),
('Export', self.export_data, "<PRIMARY>e"),
('Backup', self.quick_backup),
('Abandon', self.abort),
('Reports', self.reports_clicked),
#('GoMenu', None, _('_Go')),
#('ReportsMenu', None, _('_Reports')),
('Books', self.run_book),
#('WindowsMenu', None, _('_Windows')),
#('F2', self.__keypress, 'F2'), #pedigreeview
#('F3', self.__keypress, 'F3'), # timelinepedigreeview
#('F4', self.__keypress, 'F4'), # timelinepedigreeview
#('F5', self.__keypress, 'F5'), # timelinepedigreeview
#('F6', self.__keypress, 'F6'), # timelinepedigreeview
#('F7', self.__keypress, 'F7'),
#('F8', self.__keypress, 'F8'),
#('F9', self.__keypress, 'F9'),
#('F11', self.__keypress, 'F11'), # used to go full screen
#('F12', self.__keypress, 'F12'),
#('<PRIMARY>BackSpace', self.__keypress, '<PRIMARY>BackSpace'),
#('<PRIMARY>Delete', self.__keypress, '<PRIMARY>Delete'),
#('<PRIMARY>Insert', self.__keypress, '<PRIMARY>Insert'),
#('<PRIMARY>J', self.__keypress, '<PRIMARY>J'),
('PRIMARY-1', self.__gocat, '<PRIMARY>1'),
('PRIMARY-2', self.__gocat, '<PRIMARY>2'),
('PRIMARY-3', self.__gocat, '<PRIMARY>3'),
('PRIMARY-4', self.__gocat, '<PRIMARY>4'),
('PRIMARY-5', self.__gocat, '<PRIMARY>5'),
('PRIMARY-6', self.__gocat, '<PRIMARY>6'),
('PRIMARY-7', self.__gocat, '<PRIMARY>7'),
('PRIMARY-8', self.__gocat, '<PRIMARY>8'),
('PRIMARY-9', self.__gocat, '<PRIMARY>9'),
('PRIMARY-0', self.__gocat, '<PRIMARY>0'),
# NOTE: CTRL+ALT+NUMBER is set in gramps.gui.navigator
('PRIMARY-N', self.__next_view, '<PRIMARY>N'),
# the following conflicts with PrintView!!!
('PRIMARY-P', self.__prev_view, '<PRIMARY>P'), ]
self._action_action_list = [
('Clipboard', self.clipboard, "<PRIMARY>b"),
#('AddMenu', None, _('_Add')),
#('AddNewMenu', None, _('New')),
('PersonAdd', self.add_new_person, "<shift><Alt>p"),
('FamilyAdd', self.add_new_family, "<shift><Alt>f"),
('EventAdd', self.add_new_event, "<shift><Alt>e"),
('PlaceAdd', self.add_new_place, "<shift><Alt>l"),
('SourceAdd', self.add_new_source, "<shift><Alt>s"),
('CitationAdd', self.add_new_citation, "<shift><Alt>c"),
('RepositoryAdd', self.add_new_repository, "<shift><Alt>r"),
('MediaAdd', self.add_new_media, "<shift><Alt>m"),
('NoteAdd', self.add_new_note, "<shift><Alt>n"),
('UndoHistory', self.undo_history, "<PRIMARY>H"),
#--------------------------------------
('Import', self.import_data, "<PRIMARY>i"),
('Tools', self.tools_clicked),
#('BookMenu', None, _('_Bookmarks')),
#('ToolsMenu', None, _('_Tools')),
('ConfigView', self.config_view, '<shift><PRIMARY>c'), ]
self._undo_action_list = [
('Undo', self.undo, '<PRIMARY>z'), ]
self._redo_action_list = [
('Redo', self.redo, '<shift><PRIMARY>z'), ]
def run_book(self, *action):
"""
Run a book.
"""
try:
BookSelector(self.dbstate, self.uistate)
except WindowActiveError:
return
def __gocat(self, action, value):
"""
Callback that is called on ctrl+number press. It moves to the
requested category like __next_view/__prev_view. 0 is 10
"""
cat = int(action.get_name()[-1])
if cat == 0:
cat = 10
cat -= 1
if cat >= len(self.current_views):
#this view is not present
return False
self.goto_page(cat, None)
def __next_view(self, action, value):
"""
Callback that is called when the next category action is selected. It
selects the next category as the active category. If we reach the end,
we wrap around to the first.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num >= len(self.current_views)-1:
self.goto_page(0, None)
else:
self.goto_page(cat_num+1, None)
def __prev_view(self, action, value):
"""
Callback that is called when the previous category action is selected.
It selects the previous category as the active category. If we reach
the beginning of the list, we wrap around to the last.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num > 0:
self.goto_page(cat_num-1, None)
else:
self.goto_page(len(self.current_views)-1, None)
def init_interface(self):
"""
Initialize the interface.
"""
self.views = self.get_available_views()
defaults = views_to_show(self.views,
config.get('preferences.use-last-view'))
self.current_views = defaults[2]
self.navigator.load_plugins(self.dbstate, self.uistate)
self.goto_page(defaults[0], defaults[1])
self.uimanager.set_actions_sensitive(self.fileactions, False)
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self._pmgr.connect('plugins-reloaded',
self.__rebuild_report_and_tool_menus)
self.uimanager.set_actions_sensitive(self.fileactions, True)
if not self.file_loaded:
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
self.uimanager.update_menu()
config.connect("interface.statusbar", self.__statusbar_key_update)
def __statusbar_key_update(self, client, cnxn_id, entry, data):
"""
Callback function for statusbar key update
"""
self.uistate.modify_statusbar(self.dbstate)
def post_init_interface(self, show_manager=True):
"""
Showing the main window is deferred so that
ArgHandler can work without it always shown
"""
self.window.show()
if not self.dbstate.is_open() and show_manager:
self.__open_activate(None, None)
def do_reg_plugins(self, dbstate, uistate, rescan=False):
"""
Register the plugins at initialization time. The plugin status window
is opened on an error if the user has requested.
"""
# registering plugins
self.uistate.status_text(_('Registering plugins...'))
error = CLIManager.do_reg_plugins(self, dbstate, uistate,
rescan=rescan)
# get to see if we need to open the plugin status window
if error and config.get('behavior.pop-plugin-status'):
self.__plugin_status()
self.uistate.push_message(self.dbstate, _('Ready'))
def close_database(self, action=None, make_backup=True):
"""
Close the database
"""
self.dbstate.no_database()
self.post_close_db()
def no_del_event(self, *obj):
""" Routine to prevent window destroy with default handler if user
hits 'x' multiple times. """
return True
def quit(self, *obj):
"""
Closes out the program, backing up data
"""
# mark interface insenstitive to prevent unexpected events
self.uistate.set_sensitive(False)
# the following prevents reentering quit if user hits 'x' again
self.window.disconnect(self.del_event)
# the following prevents premature closing of main window if user
# hits 'x' multiple times.
self.window.connect('delete-event', self.no_del_event)
# backup data
if config.get('database.backup-on-exit'):
self.autobackup()
# close the database
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
# have each page save anything, if they need to:
self.__delete_pages()
# save the current window size
(width, height) = self.window.get_size()
config.set('interface.main-window-width', width)
config.set('interface.main-window-height', height)
# save the current window position
(horiz_position, vert_position) = self.window.get_position()
config.set('interface.main-window-horiz-position', horiz_position)
config.set('interface.main-window-vert-position', vert_position)
config.save()
self.app.quit()
def abort(self, *obj):
"""
Abandon changes and quit.
"""
if self.dbstate.db.abort_possible:
dialog = QuestionDialog2(
_("Abort changes?"),
_("Aborting changes will return the database to the state "
"it was before you started this editing session."),
_("Abort changes"),
_("Cancel"),
parent=self.uistate.window)
if dialog.run():
self.dbstate.db.disable_signals()
while self.dbstate.db.undo():
pass
self.quit()
else:
WarningDialog(
_("Cannot abandon session's changes"),
_('Changes cannot be completely abandoned because the '
'number of changes made in the session exceeded the '
'limit.'), parent=self.uistate.window)
def __init_action_group(self, name, actions, sensitive=True, toggles=None):
"""
Initialize an action group for the UIManager
"""
new_group = ActionGroup(name, actions)
self.uimanager.insert_action_group(new_group)
self.uimanager.set_actions_sensitive(new_group, sensitive)
return new_group
def __build_ui_manager(self):
"""
Builds the action groups
"""
self.uimanager = self.app.uimanager
self.actiongroup = self.__init_action_group(
'RW', self._action_action_list)
self.readonlygroup = self.__init_action_group(
'RO', self._readonly_action_list)
self.fileactions = self.__init_action_group(
'FileWindow', self._file_action_list)
self.undoactions = self.__init_action_group(
'Undo', self._undo_action_list, sensitive=False)
self.redoactions = self.__init_action_group(
'Redo', self._redo_action_list, sensitive=False)
self.appactions = ActionGroup('AppActions', self._app_actionlist, 'app')
self.uimanager.insert_action_group(self.appactions, gio_group=self.app)
def preferences_activate(self, *obj):
"""
Open the preferences dialog.
"""
try:
GrampsPreferences(self.uistate, self.dbstate)
except WindowActiveError:
return
def reset_font(self):
"""
Reset to the default application font.
"""
Gtk.StyleContext.remove_provider_for_screen(self.window.get_screen(),
self.provider)
def change_font(self, font):
"""
Change the default application font.
Only in the case we use symbols.
"""
if config.get('utf8.in-use') and font != "":
css_font = CSS_FONT % font
try:
self.provider.load_from_data(css_font.encode('UTF-8'))
Gtk.StyleContext.add_provider_for_screen(
self.window.get_screen(), self.provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
return True
except:
# Force gramps to use the standard font.
print("I can't set the new font :", font)
config.set('utf8.in-use', False)
config.set('utf8.selected-font', "")
return False
def tip_of_day_activate(self, *obj):
"""
Display Tip of the day
"""
from .tipofday import TipOfDay
TipOfDay(self.uistate)
def __plugin_status(self, obj=None, data=None):
"""
Display plugin status dialog
"""
try:
PluginWindows.PluginStatus(self.dbstate, self.uistate, [])
except WindowActiveError:
pass
def navigator_toggle(self, action, value):
"""
Set the sidebar based on the value of the toggle button. Save the
results in the configuration settings
"""
action.set_state(value)
if value.get_boolean():
self.ebox.show()
config.set('interface.view', True)
self.show_navigator = True
else:
self.ebox.hide()
config.set('interface.view', False)
self.show_navigator = False
config.save()
def toolbar_toggle(self, action, value):
"""
Set the toolbar based on the value of the toggle button. Save the
results in the configuration settings
"""
action.set_state(value)
toolbar = self.uimanager.get_widget('ToolBar')
if value.get_boolean():
toolbar.show_all()
config.set('interface.toolbar-on', True)
else:
toolbar.hide()
config.set('interface.toolbar-on', False)
config.save()
def fullscreen_toggle(self, action, value):
"""
Set the main Gramps window fullscreen based on the value of the
toggle button. Save the setting in the config file.
"""
action.set_state(value)
if value.get_boolean():
self.window.fullscreen()
config.set('interface.fullscreen', True)
else:
self.window.unfullscreen()
config.set('interface.fullscreen', False)
config.save()
def get_views(self):
"""
Return the view definitions.
"""
return self.views
def goto_page(self, cat_num, view_num):
"""
Create the page if it doesn't exist and make it the current page.
"""
if view_num is None:
view_num = self.current_views[cat_num]
else:
self.current_views[cat_num] = view_num
page_num = self.page_lookup.get((cat_num, view_num))
if page_num is None:
page_def = self.views[cat_num][view_num]
page_num = self.notebook.get_n_pages()
self.page_lookup[(cat_num, view_num)] = page_num
self.__create_page(page_def[0], page_def[1])
self.notebook.set_current_page(page_num)
return self.pages[page_num]
def get_category(self, cat_name):
"""
Return the category number from the given category name.
"""
for cat_num, cat_views in enumerate(self.views):
if cat_name == cat_views[0][0].category[1]:
return cat_num
return None
def __create_dummy_page(self, pdata, error):
""" Create a dummy page """
from .views.pageview import DummyPage
return DummyPage(pdata.name, pdata, self.dbstate, self.uistate,
_("View failed to load. Check error output."), error)
def __create_page(self, pdata, page_def):
"""
Create a new page and set it as the current page.
"""
try:
page = page_def(pdata, self.dbstate, self.uistate)
except:
import traceback
LOG.warning("View '%s' failed to load.", pdata.id)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
try:
page_display = page.get_display()
except:
import traceback
print("ERROR: '%s' failed to create view" % pdata.name)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
page_display = page.get_display()
page.define_actions()
page.post()
self.pages.append(page)
# create icon/label for notebook tab (useful for debugging)
hbox = Gtk.Box()
image = Gtk.Image()
image.set_from_icon_name(page.get_stock(), Gtk.IconSize.MENU)
hbox.pack_start(image, False, True, 0)
hbox.add(Gtk.Label(label=pdata.name))
hbox.show_all()
page_num = self.notebook.append_page(page.get_display(), hbox)
self.active_page.post_create()
if not self.file_loaded:
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
return page
def view_changed(self, notebook, page, page_num):
"""
Called when the notebook page is changed.
"""
if self.view_changing:
return
self.view_changing = True
cat_num = view_num = None
for key in self.page_lookup:
if self.page_lookup[key] == page_num:
cat_num, view_num = key
break
# Save last view in configuration
view_id = self.views[cat_num][view_num][0].id
config.set('preferences.last-view', view_id)
last_views = config.get('preferences.last-views')
if len(last_views) != len(self.views):
# If the number of categories has changed then reset the defaults
last_views = [''] * len(self.views)
last_views[cat_num] = view_id
config.set('preferences.last-views', last_views)
config.save()
self.navigator.view_changed(cat_num, view_num)
self.__change_page(page_num)
self.view_changing = False
def __change_page(self, page_num):
"""
Perform necessary actions when a page is changed.
"""
self.__disconnect_previous_page()
self.active_page = self.pages[page_num]
self.__connect_active_page(page_num)
self.active_page.set_active()
while Gtk.events_pending():
Gtk.main_iteration()
# bug 12048 this avoids crash if part of toolbar in view is not shown
# because of a small screen when changing views. Part of the Gtk code
# was deleting a toolbar object too soon; and another part of Gtk still
# had a reference.
def page_changer(self):
self.uimanager.update_menu()
self.active_page.change_page()
return False
GLib.idle_add(page_changer, self,
priority=GLib.PRIORITY_DEFAULT_IDLE - 10)
def __delete_pages(self):
"""
Calls on_delete() for each view
"""
for page in self.pages:
page.on_delete()
def __disconnect_previous_page(self):
"""
Disconnects the previous page, removing the old action groups
and removes the old UI components.
"""
list(map(self.uimanager.remove_ui, self.merge_ids))
if self.active_page is not None:
self.active_page.set_inactive()
groups = self.active_page.get_actions()
for grp in groups:
if grp in self.uimanager.get_action_groups():
self.uimanager.remove_action_group(grp)
self.active_page = None
def __connect_active_page(self, page_num):
"""
Inserts the action groups associated with the current page
into the UIManager
"""
for grp in self.active_page.get_actions():
self.uimanager.insert_action_group(grp)
uidef = self.active_page.ui_definition()
self.merge_ids = [self.uimanager.add_ui_from_string(uidef)]
for uidef in self.active_page.additional_ui_definitions():
mergeid = self.uimanager.add_ui_from_string(uidef)
self.merge_ids.append(mergeid)
configaction = self.uimanager.get_action(self.actiongroup,
'ConfigView')
if self.active_page.can_configure():
configaction.set_enabled(True)
else:
configaction.set_enabled(False)
def import_data(self, *obj):
"""
Imports a file
"""
if self.dbstate.is_open():
self.db_loader.import_file()
infotxt = self.db_loader.import_info_text()
if infotxt:
InfoDialog(_('Import Statistics'), infotxt,
parent=self.window)
self.__post_load()
def __open_activate(self, obj, value):
"""
Called when the Open button is clicked, opens the DbManager
"""
from .dbman import DbManager
dialog = DbManager(self.uistate, self.dbstate, self, self.window)
value = dialog.run()
if value:
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
(filename, title) = value
self.db_loader.read_file(filename)
self._post_load_newdb(filename, 'x-directory/normal', title)
else:
if dialog.after_change != "":
# We change the title of the main window.
old_title = self.uistate.window.get_title()
if old_title:
delim = old_title.find(' - ')
tit1 = old_title[:delim]
tit2 = old_title[delim:]
new_title = dialog.after_change
if '<=' in tit2:
## delim2 = tit2.find('<=') + 3
## tit3 = tit2[delim2:-1]
new_title += tit2.replace(']', '') + ' => ' + tit1 + ']'
else:
new_title += tit2 + ' <= [' + tit1 + ']'
self.uistate.window.set_title(new_title)
def __post_load(self):
"""
This method is for the common UI post_load, both new files
and added data like imports.
"""
self.dbstate.db.undo_callback = self.__change_undo_label
self.dbstate.db.redo_callback = self.__change_redo_label
self.__change_undo_label(None, update_menu=False)
self.__change_redo_label(None, update_menu=False)
self.dbstate.db.undo_history_callback = self.undo_history_update
self.undo_history_close()
def _post_load_newdb(self, filename, filetype, title=None):
"""
The method called after load of a new database.
Inherit CLI method to add GUI part
"""
if self.dbstate.db.is_open():
self._post_load_newdb_nongui(filename, title)
self._post_load_newdb_gui(filename, filetype, title)
def _post_load_newdb_gui(self, filename, filetype, title=None):
"""
Called after a new database is loaded to do GUI stuff
"""
# GUI related post load db stuff
# Update window title
if filename[-1] == os.path.sep:
filename = filename[:-1]
name = os.path.basename(filename)
if title:
name = title
isopen = self.dbstate.is_open()
if not isopen:
rw = False
msg = "Gramps"
else:
rw = not self.dbstate.db.readonly
if rw:
msg = "%s - Gramps" % name
else:
msg = "%s (%s) - Gramps" % (name, _('Read Only'))
self.uistate.window.set_title(msg)
if(bool(config.get('behavior.runcheck')) and QuestionDialog2(
_("Gramps had a problem the last time it was run."),
_("Would you like to run the Check and Repair tool?"),
_("Yes"), _("No"), parent=self.uistate.window).run()):
pdata = self._pmgr.get_plugin('check')
mod = self._pmgr.load_plugin(pdata)
tool.gui_tool(dbstate=self.dbstate, user=self.user,
tool_class=getattr(mod, pdata.toolclass),
options_class=getattr(mod, pdata.optionclass),
translated_name=pdata.name,
name=pdata.id,
category=pdata.category,
callback=self.dbstate.db.request_rebuild)
config.set('behavior.runcheck', False)
self.__change_page(self.notebook.get_current_page())
self.uimanager.set_actions_visible(self.actiongroup, rw)
self.uimanager.set_actions_visible(self.readonlygroup, isopen)
self.uimanager.set_actions_visible(self.undoactions, rw)
self.uimanager.set_actions_visible(self.redoactions, rw)
self.recent_manager.build()
# Call common __post_load method for GUI update after a change
self.__post_load()
def post_close_db(self):
"""
Called after a database is closed to do GUI stuff.
"""
self.undo_history_close()
self.uistate.window.set_title("%s - Gramps" % _('No Family Tree'))
self.uistate.clear_filter_results()
self.__disconnect_previous_page()
self.uimanager.set_actions_visible(self.actiongroup, False)
self.uimanager.set_actions_visible(self.readonlygroup, False)
self.uimanager.set_actions_visible(self.undoactions, False)
self.uimanager.set_actions_visible(self.redoactions, False)
self.uimanager.update_menu()
config.set('paths.recent-file', '')
config.save()
def __change_undo_label(self, label, update_menu=True):
"""
Change the UNDO label
"""
_menu = '''<placeholder id="undo">
<item>
<attribute name="action">win.Undo</attribute>
<attribute name="label">%s</attribute>
</item>
</placeholder>
'''
if not label:
label = _('_Undo')
self.uimanager.set_actions_sensitive(self.undoactions, False)
else:
self.uimanager.set_actions_sensitive(self.undoactions, True)
self.uimanager.add_ui_from_string([_menu % html.escape(label)])
if update_menu:
self.uimanager.update_menu()
def __change_redo_label(self, label, update_menu=True):
"""
Change the REDO label
"""
_menu = '''<placeholder id="redo">
<item>
<attribute name="action">win.Redo</attribute>
<attribute name="label">%s</attribute>
</item>
</placeholder>
'''
if not label:
label = _('_Redo')
self.uimanager.set_actions_sensitive(self.redoactions, False)
else:
self.uimanager.set_actions_sensitive(self.redoactions, True)
self.uimanager.add_ui_from_string([_menu % html.escape(label)])
if update_menu:
self.uimanager.update_menu()
def undo_history_update(self):
"""
This function is called to update both the state of
the Undo History menu item (enable/disable) and
the contents of the Undo History window.
"""
try:
# Try updating undo history window if it exists
self.undo_history_window.update()
except AttributeError:
# Let it go: history window does not exist
return
def undo_history_close(self):
"""
Closes the undo history
"""
try:
# Try closing undo history window if it exists
if self.undo_history_window.opened:
self.undo_history_window.close()
except AttributeError:
# Let it go: history window does not exist
return
def quick_backup(self, *obj):
"""
Make a quick XML back with or without media.
"""
try:
QuickBackup(self.dbstate, self.uistate, self.user)
except WindowActiveError:
return
def autobackup(self):
"""
Backup the current family tree.
"""
if self.delay_timer is not None:
GLib.source_remove(self.delay_timer)
self.delay_timer = None
interval = config.get('database.autobackup')
if interval == 1:
seconds = 900. # 15min *60
elif interval == 2:
seconds = 1800. # 30min *60
elif interval == 3:
seconds = 3600. # 60min *60
elif interval == 4:
seconds = 43200. # (12 hours) 720min *60
elif interval == 5:
seconds = 86400. # (24 hours) 1440min *60
now = time.time()
if interval and now > self.autobackup_time + seconds + 300.:
# we have been delayed by more than 5 minutes
# so we have probably been awakened from sleep/hibernate
# we should delay a bit more to let the system settle
self.delay_timer = GLib.timeout_add_seconds(300, self.autobackup)
self.autobackup_time = now
return
self.autobackup_time = now
# Only backup if more commits since last time
if(self.dbstate.db.is_open() and
self.dbstate.db.has_changed > self.prev_has_changed):
self.prev_has_changed = self.dbstate.db.has_changed
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Autobackup..."))
try:
self.__backup()
except DbWriteFailure as msg:
self.uistate.push_message(self.dbstate,
_("Error saving backup data"))
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
def __backup(self):
"""
Backup database to a Gramps XML file.
"""
from gramps.plugins.export.exportxml import XmlWriter
backup_path = config.get('database.backup-path')
compress = config.get('database.compress-backup')
writer = XmlWriter(self.dbstate.db, self.user, strip_photos=0,
compress=compress)
timestamp = '{0:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
backup_name = "%s-%s.gramps" % (self.dbstate.db.get_dbname(),
timestamp)
filename = os.path.join(backup_path, backup_name)
writer.write(filename)
def reports_clicked(self, *obj):
"""
Displays the Reports dialog
"""
try:
ReportPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def tools_clicked(self, *obj):
"""
Displays the Tools dialog
"""
try:
ToolPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def clipboard(self, *obj):
"""
Displays the Clipboard
"""
from .clipboard import ClipboardWindow
try:
ClipboardWindow(self.dbstate, self.uistate)
except WindowActiveError:
return
# ---------------Add new xxx --------------------------------
def add_new_person(self, *obj):
"""
Add a new person to the database. (Global keybinding)
"""
person = Person()
#the editor requires a surname
person.primary_name.add_surname(Surname())
person.primary_name.set_primary_surname(0)
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
def add_new_family(self, *obj):
"""
Add a new family to the database. (Global keybinding)
"""
family = Family()
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
def add_new_event(self, *obj):
"""
Add a new custom/unknown event (Note you type first letter of event)
"""
try:
event = Event()
event.set_type(EventType.UNKNOWN)
EditEvent(self.dbstate, self.uistate, [], event)
except WindowActiveError:
pass
def add_new_place(self, *obj):
"""Add a new place to the place list"""
try:
EditPlace(self.dbstate, self.uistate, [], Place())
except WindowActiveError:
pass
def add_new_source(self, *obj):
"""Add a new source to the source list"""
try:
EditSource(self.dbstate, self.uistate, [], Source())
except WindowActiveError:
pass
def add_new_repository(self, *obj):
"""Add a new repository to the repository list"""
try:
EditRepository(self.dbstate, self.uistate, [], Repository())
except WindowActiveError:
pass
def add_new_citation(self, *obj):
"""
Add a new citation
"""
try:
EditCitation(self.dbstate, self.uistate, [], Citation())
except WindowActiveError:
pass
def add_new_media(self, *obj):
"""Add a new media object to the media list"""
try:
EditMedia(self.dbstate, self.uistate, [], Media())
except WindowActiveError:
pass
def add_new_note(self, *obj):
"""Add a new note to the note list"""
try:
EditNote(self.dbstate, self.uistate, [], Note())
except WindowActiveError:
pass
# ------------------------------------------------------------------------
def config_view(self, *obj):
"""
Displays the configuration dialog for the active view
"""
self.active_page.configure()
def undo(self, *obj):
"""
Calls the undo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.undo()
self.uistate.set_busy_cursor(False)
def redo(self, *obj):
"""
Calls the redo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.redo()
self.uistate.set_busy_cursor(False)
def undo_history(self, *obj):
"""
Displays the Undo history window
"""
try:
self.undo_history_window = UndoHistory(self.dbstate, self.uistate)
except WindowActiveError:
return
def export_data(self, *obj):
"""
Calls the ExportAssistant to export data
"""
if self.dbstate.is_open():
from .plug.export import ExportAssistant
try:
ExportAssistant(self.dbstate, self.uistate)
except WindowActiveError:
return
def __rebuild_report_and_tool_menus(self):
"""
Callback that rebuilds the tools and reports menu
"""
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self.uistate.set_relationship_class()
def __build_tools_menu(self, tool_menu_list):
"""
Builds a new tools menu
"""
if self.toolactions:
self.uistate.uimanager.remove_action_group(self.toolactions)
self.uistate.uimanager.remove_ui(self.tool_menu_ui_id)
self.toolactions = ActionGroup(name='ToolWindow')
(uidef, actions) = self.build_plugin_menu(
'ToolsMenu', tool_menu_list, tool.tool_categories,
make_plugin_callback)
self.toolactions.add_actions(actions)
self.tool_menu_ui_id = self.uistate.uimanager.add_ui_from_string(uidef)
self.uimanager.insert_action_group(self.toolactions)
def __build_report_menu(self, report_menu_list):
"""
Builds a new reports menu
"""
if self.reportactions:
self.uistate.uimanager.remove_action_group(self.reportactions)
self.uistate.uimanager.remove_ui(self.report_menu_ui_id)
self.reportactions = ActionGroup(name='ReportWindow')
(udef, actions) = self.build_plugin_menu(
'ReportsMenu', report_menu_list, standalone_categories,
make_plugin_callback)
self.reportactions.add_actions(actions)
self.report_menu_ui_id = self.uistate.uimanager.add_ui_from_string(udef)
self.uimanager.insert_action_group(self.reportactions)
def build_plugin_menu(self, text, item_list, categories, func):
"""
Builds a new XML description for a menu based on the list of plugindata
"""
menuitem = ('<item>\n'
'<attribute name="action">win.%s</attribute>\n'
'<attribute name="label">%s...</attribute>\n'
'</item>\n')
actions = []
ofile = StringIO()
ofile.write('<section id="%s">' % ('P_' + text))
hash_data = defaultdict(list)
for pdata in item_list:
if not pdata.supported:
category = _UNSUPPORTED
else:
category = categories[pdata.category]
hash_data[category].append(pdata)
# Sort categories, skipping the unsupported
catlist = sorted(item for item in hash_data if item != _UNSUPPORTED)
for key in catlist:
ofile.write('<submenu>\n<attribute name="label"'
'>%s</attribute>\n' % key[1])
pdatas = hash_data[key]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = valid_action_name(pdata.id)
ofile.write(menuitem % (new_key, pdata.name))
actions.append((new_key, func(pdata, self.dbstate,
self.uistate)))
ofile.write('</submenu>\n')
# If there are any unsupported items we add separator
# and the unsupported category at the end of the menu
if _UNSUPPORTED in hash_data:
ofile.write('<submenu>\n<attribute name="label"'
'>%s</attribute>\n' %
_UNSUPPORTED[1])
pdatas = hash_data[_UNSUPPORTED]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = pdata.id.replace(' ', '-')
ofile.write(menuitem % (new_key, pdata.name))
actions.append((new_key, func(pdata, self.dbstate,
self.uistate)))
ofile.write('</submenu>\n')
ofile.write('</section>\n')
return ([ofile.getvalue()], actions)
def display_about_box(self, *obj):
"""Display the About box."""
about = GrampsAboutDialog(self.uistate.window)
about.run()
about.destroy()
def get_available_views(self):
"""
Query the views and determine what views to show and in which order
:Returns: a list of lists containing tuples (view_id, viewclass)
"""
pmgr = GuiPluginManager.get_instance()
view_list = pmgr.get_reg_views()
viewstoshow = defaultdict(list)
for pdata in view_list:
mod = pmgr.load_plugin(pdata)
if not mod or not hasattr(mod, pdata.viewclass):
#import of plugin failed
try:
lasterror = pmgr.get_fail_list()[-1][1][1]
except:
lasterror = '*** No error found, '
lasterror += 'probably error in gpr.py file ***'
ErrorDialog(
_('Failed Loading View'),
_('The view %(name)s did not load and reported an error.'
'\n\n%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you '
'can submit a bug at %(gramps_bugtracker_url)s '
'or contact the view author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this view '
'again, you can hide it by using the Plugin Manager '
'on the Help menu.'
) % {'name': pdata.name,
'gramps_bugtracker_url': URL_BUGHOME,
'firstauthoremail': pdata.authors_email[0]
if pdata.authors_email else '...',
'error_msg': lasterror},
parent=self.uistate.window)
continue
viewclass = getattr(mod, pdata.viewclass)
# pdata.category is (string, trans-string):
if pdata.order == START:
viewstoshow[pdata.category[0]].insert(0, (pdata, viewclass))
else:
viewstoshow[pdata.category[0]].append((pdata, viewclass))
# First, get those in order defined, if exists:
resultorder = [viewstoshow[cat]
for cat in config.get("interface.view-categories")
if cat in viewstoshow]
# Next, get the rest in some order:
resultorder.extend(viewstoshow[cat]
for cat in sorted(viewstoshow.keys())
if viewstoshow[cat] not in resultorder)
return resultorder
def key_bindings(*obj):
"""
Display key bindings
"""
display_help(webpage=WIKI_HELP_PAGE_KEY)
def manual_activate(*obj):
"""
Display the Gramps manual
"""
display_help(webpage=WIKI_HELP_PAGE_MAN)
def report_bug_activate(*obj):
"""
Display the bug tracker web site
"""
display_url(URL_BUGTRACKER)
def home_page_activate(*obj):
"""
Display the Gramps home page
"""
display_url(URL_HOMEPAGE)
def mailing_lists_activate(*obj):
"""
Display the mailing list web page
"""
display_url(URL_MAILINGLIST)
def extra_plugins_activate(*obj):
"""
Display the wiki page with extra plugins
"""
display_url(URL_WIKISTRING+WIKI_EXTRAPLUGINS)
def faq_activate(*obj):
"""
Display FAQ
"""
display_help(webpage=WIKI_HELP_PAGE_FAQ)
def run_plugin(pdata, dbstate, uistate):
"""
run a plugin based on it's PluginData:
1/ load plugin.
2/ the report is run
"""
pmgr = GuiPluginManager.get_instance()
mod = pmgr.load_plugin(pdata)
if not mod:
#import of plugin failed
failed = pmgr.get_fail_list()
if failed:
error_msg = failed[-1][1][1]
else:
error_msg = "(no error message)"
ErrorDialog(
_('Failed Loading Plugin'),
_('The plugin %(name)s did not load and reported an error.\n\n'
'%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you can '
'submit a bug at %(gramps_bugtracker_url)s or contact '
'the plugin author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this plugin again, '
'you can hide it by using the Plugin Manager on the '
'Help menu.') % {'name' : pdata.name,
'gramps_bugtracker_url' : URL_BUGHOME,
'firstauthoremail' : pdata.authors_email[0]
if pdata.authors_email
else '...',
'error_msg' : error_msg},
parent=uistate.window)
return
if pdata.ptype == REPORT:
report(dbstate, uistate, uistate.get_active('Person'),
getattr(mod, pdata.reportclass),
getattr(mod, pdata.optionclass),
pdata.name, pdata.id,
pdata.category, pdata.require_active)
else:
tool.gui_tool(dbstate=dbstate, user=User(uistate=uistate),
tool_class=getattr(mod, pdata.toolclass),
options_class=getattr(mod, pdata.optionclass),
translated_name=pdata.name,
name=pdata.id,
category=pdata.category,
callback=dbstate.db.request_rebuild)
gc.collect(2)
def make_plugin_callback(pdata, dbstate, uistate):
"""
Makes a callback for a report/tool menu item
"""
return lambda x, y: run_plugin(pdata, dbstate, uistate)
def views_to_show(views, use_last=True):
"""
Determine based on preference setting which views should be shown
"""
current_cat = 0
current_cat_view = 0
default_cat_views = [0] * len(views)
if use_last:
current_page_id = config.get('preferences.last-view')
default_page_ids = config.get('preferences.last-views')
found = False
for indexcat, cat_views in enumerate(views):
cat_view = 0
for pdata, page_def in cat_views:
if not found:
if pdata.id == current_page_id:
current_cat = indexcat
current_cat_view = cat_view
default_cat_views[indexcat] = cat_view
found = True
break
if pdata.id in default_page_ids:
default_cat_views[indexcat] = cat_view
cat_view += 1
if not found:
current_cat = 0
current_cat_view = 0
return current_cat, current_cat_view, default_cat_views
class QuickBackup(ManagedWindow): # TODO move this class into its own module
def __init__(self, dbstate, uistate, user):
"""
Make a quick XML back with or without media.
"""
self.dbstate = dbstate
self.user = user
ManagedWindow.__init__(self, uistate, [], self.__class__)
window = Gtk.Dialog(title='',
transient_for=self.uistate.window,
destroy_with_parent=True)
self.set_window(window, None, _("Gramps XML Backup"))
self.setup_configs('interface.quick-backup', 500, 150)
close_button = window.add_button(_('_Close'),
Gtk.ResponseType.CLOSE)
ok_button = window.add_button(_('_OK'),
Gtk.ResponseType.APPLY)
vbox = window.get_content_area()
hbox = Gtk.Box()
label = Gtk.Label(label=_("Path:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
path_entry = Gtk.Entry()
dirtext = config.get('paths.quick-backup-directory')
path_entry.set_text(dirtext)
hbox.pack_start(path_entry, True, True, 0)
file_entry = Gtk.Entry()
button = Gtk.Button()
button.connect("clicked",
lambda widget:
self.select_backup_path(widget, path_entry))
image = Gtk.Image()
image.set_from_icon_name('document-open', Gtk.IconSize.BUTTON)
image.show()
button.add(image)
hbox.pack_end(button, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
label = Gtk.Label(label=_("File:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
struct_time = time.localtime()
file_entry.set_text(
config.get('paths.quick-backup-filename'
) % {"filename": self.dbstate.db.get_dbname(),
"year": struct_time.tm_year,
"month": struct_time.tm_mon,
"day": struct_time.tm_mday,
"hour": struct_time.tm_hour,
"minutes": struct_time.tm_min,
"seconds": struct_time.tm_sec,
"extension": "gpkg"})
hbox.pack_end(file_entry, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
fbytes = 0
mbytes = "0"
for media in self.dbstate.db.iter_media():
fullname = media_path_full(self.dbstate.db, media.get_path())
try:
fbytes += os.path.getsize(fullname)
length = len(str(fbytes))
if fbytes <= 999999:
mbytes = "< 1"
else:
mbytes = str(fbytes)[:(length-6)]
except OSError:
pass
label = Gtk.Label(label=_("Media:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
include = Gtk.RadioButton.new_with_mnemonic_from_widget(
None, "%s (%s %s)" % (_("Include"),
mbytes, _("MB", "Megabyte")))
exclude = Gtk.RadioButton.new_with_mnemonic_from_widget(include,
_("Exclude"))
include.connect("toggled", lambda widget: self.media_toggle(widget,
file_entry))
include_mode = config.get('preferences.quick-backup-include-mode')
if include_mode:
include.set_active(True)
else:
exclude.set_active(True)
hbox.pack_start(include, False, True, 0)
hbox.pack_end(exclude, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
self.show()
dbackup = window.run()
if dbackup == Gtk.ResponseType.APPLY:
# if file exists, ask if overwrite; else abort
basefile = file_entry.get_text()
basefile = basefile.replace("/", r"-")
filename = os.path.join(path_entry.get_text(), basefile)
if os.path.exists(filename):
question = QuestionDialog2(
_("Backup file already exists! Overwrite?"),
_("The file '%s' exists.") % filename,
_("Proceed and overwrite"),
_("Cancel the backup"),
parent=self.window)
yes_no = question.run()
if not yes_no:
current_dir = path_entry.get_text()
if current_dir != dirtext:
config.set('paths.quick-backup-directory', current_dir)
self.close()
return
position = self.window.get_position() # crock
window.hide()
self.window.move(position[0], position[1])
self.uistate.set_busy_cursor(True)
self.uistate.pulse_progressbar(0)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Making backup..."))
if include.get_active():
from gramps.plugins.export.exportpkg import PackageWriter
writer = PackageWriter(self.dbstate.db, filename, self.user)
writer.export()
else:
from gramps.plugins.export.exportxml import XmlWriter
writer = XmlWriter(self.dbstate.db, self.user,
strip_photos=0, compress=1)
writer.write(filename)
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
self.uistate.push_message(self.dbstate,
_("Backup saved to '%s'") % filename)
config.set('paths.quick-backup-directory', path_entry.get_text())
else:
self.uistate.push_message(self.dbstate, _("Backup aborted"))
if dbackup != Gtk.ResponseType.DELETE_EVENT:
self.close()
def select_backup_path(self, widget, path_entry):
"""
Choose a backup folder. Make sure there is one highlighted in
right pane, otherwise FileChooserDialog will hang.
"""
fdialog = Gtk.FileChooserDialog(
title=_("Select backup directory"),
transient_for=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER)
fdialog.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Apply'), Gtk.ResponseType.OK)
mpath = path_entry.get_text()
if not mpath:
mpath = HOME_DIR
fdialog.set_current_folder(os.path.dirname(mpath))
fdialog.set_filename(os.path.join(mpath, "."))
status = fdialog.run()
if status == Gtk.ResponseType.OK:
filename = fdialog.get_filename()
if filename:
path_entry.set_text(filename)
fdialog.destroy()
return True
def media_toggle(self, widget, file_entry):
"""
Toggles media include values in the quick backup dialog.
"""
include = widget.get_active()
config.set('preferences.quick-backup-include-mode', include)
extension = "gpkg" if include else "gramps"
filename = file_entry.get_text()
if "." in filename:
base, ext = filename.rsplit(".", 1)
file_entry.set_text("%s.%s" % (base, extension))
else:
file_entry.set_text("%s.%s" % (filename, extension))
|
gramps-project/gramps
|
gramps/gui/viewmanager.py
|
Python
|
gpl-2.0
| 69,159
|
[
"Brian"
] |
1c1cd0000e57b15e3abc351160f657c090c1f3f2d766d9c08a516ac4d9aa0c2d
|
""" GOCDBClient module is a client for the GOC DB, looking for Downtimes.
"""
__RCSID__ = "$Id$"
import urllib2
import time
import socket
from datetime import datetime, timedelta
from xml.dom import minidom
from DIRAC import S_OK, S_ERROR, gLogger
def _parseSingleElement( element, attributes = None ):
"""
Given a DOM Element, return a dictionary of its child elements and values (as strings).
"""
handler = {}
for child in element.childNodes:
attrName = str( child.nodeName )
if attributes is not None:
if attrName not in attributes:
continue
try:
attrValue = str( child.childNodes[0].nodeValue )
except IndexError:
continue
handler[attrName] = attrValue
return handler
#############################################################################
class GOCDBClient( object ):
""" Class for dealing with GOCDB. Class because of easier use from RSS
"""
#############################################################################
def getStatus( self, granularity, name = None, startDate = None,
startingInHours = None, timeout = None ):
"""
Return actual GOCDB status of entity in `name`
:params:
:attr:`granularity`: string: should be a ValidRes, e.g. "Resource"
:attr:`name`: should be the name(s) of the ValidRes.
Could be a list of basestring or simply one basestring.
If not given, fetches the complete list.
:attr:`startDate`: if not given, takes only ongoing DownTimes.
if given, could be a datetime or a string ("YYYY-MM-DD"), and download
DownTimes starting after that date.
:attr:`startingInHours`: optional integer. If given, donwload
DownTimes starting in the next given hours (startDate is then useless)
:return: (example)
{'OK': True,
'Value': {'92569G0': {'DESCRIPTION': 'Annual site downtime for various major tasks in the area of network, storage, etc.',
'FORMATED_END_DATE': '2014-05-27 15:21',
'FORMATED_START_DATE': '2014-05-26 04:00',
'GOCDB_PORTAL_URL': 'https://goc.egi.eu/portal/index.php?Page_Type=Downtime&id=14051',
'HOSTED_BY': 'FZK-LCG2',
'HOSTNAME': 'lhcbsrm-kit.gridka.de',
'SERVICE_TYPE': 'SRM.nearline',
'SEVERITY': 'OUTAGE'},
'93293G0': {'DESCRIPTION': 'Maintenance on KIT campus border routers. In the unlikely event that redundancy should fail, FZK-LCG2 connection to the GPN will be down. LHCOPN/LHCONE will stay up.',
'FORMATED_END_DATE': '2014-07-12 14:00',
'FORMATED_START_DATE': '2014-07-12 06:00',
'GOCDB_PORTAL_URL': 'https://goc.egi.eu/portal/index.php?Page_Type=Downtime&id=14771',
'HOSTED_BY': 'FZK-LCG2',
'HOSTNAME': 'lhcbsrm-kit.gridka.de',
'SERVICE_TYPE': 'SRM.nearline',
'SEVERITY': 'WARNING'}
}
}
"""
startDate_STR = None
startDateMax = None
if startingInHours is not None:
startDate = datetime.utcnow()
startDateMax = startDate + timedelta( hours = startingInHours )
if startDate is not None:
if isinstance( startDate, basestring ):
startDate_STR = startDate
startDate = datetime( *time.strptime( startDate, "%Y-%m-%d" )[0:3] )
elif isinstance( startDate, datetime ):
startDate_STR = startDate.isoformat( ' ' )[0:10]
if timeout is not None:
socket.setdefaulttimeout( 10 )
if startingInHours is not None:
# make 2 queries and later merge the results
# first call: pass the startDate argument as None,
# so the curlDownload method will search for only ongoing DTs
resXML_ongoing = self._downTimeCurlDownload( name )
if resXML_ongoing is None:
res_ongoing = {}
else:
res_ongoing = self._downTimeXMLParsing( resXML_ongoing, granularity, name )
# second call: pass the startDate argument
resXML_startDate = self._downTimeCurlDownload( name, startDate_STR )
if resXML_startDate is None:
res_startDate = {}
else:
res_startDate = self._downTimeXMLParsing( resXML_startDate, granularity,
name, startDateMax )
# merge the results of the 2 queries:
res = res_ongoing
for k in res_startDate.keys():
if k not in res.keys():
res[k] = res_startDate[k]
else:
#just query for onGoing downtimes
resXML = self._downTimeCurlDownload( name, startDate_STR )
if resXML is None:
return S_OK( None )
res = self._downTimeXMLParsing( resXML, granularity, name, startDateMax )
# Common: build URL
# if res is None or res == []:
# return S_OK(None)
#
# self.buildURL(res)
if res == {}:
res = None
return S_OK( res )
#############################################################################
def getServiceEndpointInfo( self, granularity, entity ):
"""
Get service endpoint info (in a dictionary)
:params:
:attr:`granularity` : a string. Could be in ('hostname', 'sitename', 'roc',
'country', 'service_type', 'monitored')
:attr:`entity` : a string. Actual name of the entity.
"""
assert( type( granularity ) == str and type( entity ) == str )
try:
serviceXML = self._getServiceEndpointCurlDownload( granularity, entity )
return S_OK( self._serviceEndpointXMLParsing( serviceXML ) )
except Exception, e:
_msg = 'Exception getting information for %s %s: %s' % ( granularity, entity, e )
gLogger.exception( _msg )
return S_ERROR( _msg )
#############################################################################
# def getSiteInfo(self, site):
# """
# Get site info (in a dictionary)
#
# :params:
# :attr:`entity` : a string. Actual name of the site.
# """
#
# siteXML = self._getSiteCurlDownload(site)
# return S_OK(self._siteXMLParsing(siteXML))
#############################################################################
# def buildURL(self, DTList):
# '''build the URL relative to the DT '''
# baseURL = "https://goc.egi.eu/downtime/list?id="
# for dt in DTList:
# id = str(dt['id'])
# url = baseURL + id
# dt['URL'] = url
#############################################################################
def _downTimeCurlDownload( self, entity = None, startDate = None ):
""" Download ongoing downtimes for entity using the GOC DB programmatic interface
"""
#GOCDB-PI url and method settings
#
# Set the GOCDB URL
gocdbpi_url = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_downtime"
# Set the desidered start date
if startDate is None:
when = "&ongoing_only=yes"
gocdbpi_startDate = ""
else:
when = "&startdate="
gocdbpi_startDate = startDate
# GOCDB-PI to query
gocdb_ep = gocdbpi_url
if entity is not None:
if isinstance( entity, basestring ):
gocdb_ep = gocdb_ep + "&topentity=" + entity
gocdb_ep = gocdb_ep + when + gocdbpi_startDate
req = urllib2.Request( gocdb_ep )
dtPage = urllib2.urlopen( req )
dt = dtPage.read()
return dt
#############################################################################
def _getServiceEndpointCurlDownload( self, granularity, entity ):
"""
Calls method `get_service_endpoint` from the GOC DB programmatic interface.
:params:
:attr:`granularity` : a string. Could be in ('hostname', 'sitename', 'roc',
'country', 'service_type', 'monitored')
:attr:`entity` : a string. Actual name of the entity.
"""
if type( granularity ) != str or type( entity ) != str:
raise ValueError, "Arguments must be strings."
# GOCDB-PI query
gocdb_ep = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_service_endpoint&" \
+ granularity + '=' + entity
service_endpoint_page = urllib2.urlopen( gocdb_ep )
return service_endpoint_page.read()
#############################################################################
# def _getSiteCurlDownload(self, site):
# """
# Calls method `get_site` from the GOC DB programmatic interface.
#
# :params:
# :attr:`site` : a string. Actual name of the site.
# """
#
# # GOCDB-PI query
# gocdb_ep = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_site&sitename="+site
#
# req = urllib2.Request(gocdb_ep)
# site_page = urllib2.urlopen(req)
#
# return site_page.read()
#############################################################################
def _downTimeXMLParsing( self, dt, siteOrRes, entities = None, startDateMax = None ):
""" Performs xml parsing from the dt string (returns a dictionary)
"""
doc = minidom.parseString( dt )
downtimeElements = doc.getElementsByTagName( "DOWNTIME" )
dtDict = {}
for dtElement in downtimeElements:
elements = _parseSingleElement( dtElement, ['SEVERITY', 'SITENAME', 'HOSTNAME', 'ENDPOINT',
'HOSTED_BY', 'FORMATED_START_DATE',
'FORMATED_END_DATE', 'DESCRIPTION',
'GOCDB_PORTAL_URL', 'SERVICE_TYPE' ] )
try:
dtDict[ str( dtElement.getAttributeNode( "PRIMARY_KEY" ).nodeValue ) + ' ' + elements['ENDPOINT'] ] = elements
except Exception:
try:
dtDict[ str( dtElement.getAttributeNode( "PRIMARY_KEY" ).nodeValue ) + ' ' + elements['HOSTNAME'] ] = elements
except Exception:
dtDict[ str( dtElement.getAttributeNode( "PRIMARY_KEY" ).nodeValue ) + ' ' + elements['SITENAME'] ] = elements
for dt_ID in dtDict.keys():
if siteOrRes in ( 'Site', 'Sites' ):
if not ( 'SITENAME' in dtDict[dt_ID].keys() ):
dtDict.pop( dt_ID )
continue
if entities is not None:
if not isinstance( entities, list ):
entities = [entities]
if not ( dtDict[dt_ID]['SITENAME'] in entities ):
dtDict.pop( dt_ID )
elif siteOrRes in ( 'Resource', 'Resources' ):
if not ( 'HOSTNAME' in dtDict[dt_ID].keys() ):
dtDict.pop( dt_ID )
continue
if entities is not None:
if not isinstance( entities, list ):
entities = [entities]
if not ( dtDict[dt_ID]['HOSTNAME'] in entities ):
dtDict.pop( dt_ID )
if startDateMax is not None:
for dt_ID in dtDict.keys():
startDateMaxFromKeys = datetime( *time.strptime( dtDict[dt_ID]['FORMATED_START_DATE'],
"%Y-%m-%d %H:%M" )[0:5] )
if startDateMaxFromKeys > startDateMax:
dtDict.pop( dt_ID )
return dtDict
#############################################################################
def _serviceEndpointXMLParsing( self, serviceXML ):
""" Performs xml parsing from the service endpoint string
Returns a list.
"""
doc = minidom.parseString( serviceXML )
services = doc.getElementsByTagName( "SERVICE_ENDPOINT" )
services = [_parseSingleElement( s ) for s in services]
return services
|
calancha/DIRAC
|
Core/LCG/GOCDBClient.py
|
Python
|
gpl-3.0
| 11,486
|
[
"DIRAC"
] |
8661f52df2ee0c3c3b36b2d58ae2bf60343ff38e1edcbae55a01c3548a12a496
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Opencv(CMakePackage, CudaPackage):
"""OpenCV is released under a BSD license and hence it's free for both
academic and commercial use. It has C++, C, Python and Java interfaces and
supports Windows, Linux, Mac OS, iOS and Android. OpenCV was designed for
computational efficiency and with a strong focus on real-time applications.
Written in optimized C/C++, the library can take advantage of multi-core
processing. Enabled with OpenCL, it can take advantage of the hardware
acceleration of the underlying heterogeneous compute platform. Adopted all
around the world, OpenCV has more than 47 thousand people of user community
and estimated number of downloads exceeding 9 million. Usage ranges from
interactive art, to mines inspection, stitching maps on the web or through
advanced robotics.
"""
homepage = 'http://opencv.org/'
url = 'https://github.com/Itseez/opencv/archive/3.1.0.tar.gz'
git = 'https://github.com/opencv/opencv.git'
version('master', branch='master')
version('4.2.0', sha256='9ccb2192d7e8c03c58fee07051364d94ed7599363f3b0dce1c5e6cc11c1bb0ec')
version('4.1.2', sha256='385dd0a9c25e67ef0dd60e022d2a2d7b17e2f36819cf3cb46aa8cdff5c5282c9')
version('4.1.1', sha256='5de5d96bdfb9dad6e6061d70f47a0a91cee96bb35afb9afb9ecb3d43e243d217')
version('4.1.0-openvino', sha256='58764d2487c6fb4cd950fb46483696ae7ae28e257223d6e44e162caa22ee9e5c')
version('4.1.0', sha256='8f6e4ab393d81d72caae6e78bd0fd6956117ec9f006fba55fcdb88caf62989b7')
version('4.0.1-openvino', sha256='8cbe32d12a70decad7a8327eb4fba46016a9c47ff3ba6e114d27b450f020716f')
version('4.0.1', sha256='7b86a0ee804244e0c407321f895b15e4a7162e9c5c0d2efc85f1cadec4011af4')
version('4.0.0-openvino', sha256='aa910078ed0b7e17bd10067e04995c131584a6ed6d0dcc9ca44a292aa8e296fc')
version('4.0.0', sha256='3787b3cc7b21bba1441819cb00c636911a846c0392ddf6211d398040a1e4886c')
version('3.4.6', sha256='e7d311ff97f376b8ee85112e2b536dbf4bdf1233673500175ed7cf21a0089f6d')
version('3.4.5', sha256='0c57d9dd6d30cbffe68a09b03f4bebe773ee44dc8ff5cd6eaeb7f4d5ef3b428e')
version('3.4.4', sha256='a35b00a71d77b484f73ec485c65fe56c7a6fa48acd5ce55c197aef2e13c78746')
version('3.4.3', sha256='4eef85759d5450b183459ff216b4c0fa43e87a4f6aa92c8af649f89336f002ec')
version('3.4.1', sha256='f1b87684d75496a1054405ae3ee0b6573acaf3dad39eaf4f1d66fdd7e03dc852')
version('3.4.0', sha256='678cc3d2d1b3464b512b084a8cca1fad7de207c7abdf2caa1fed636c13e916da')
version('3.3.1', sha256='5dca3bb0d661af311e25a72b04a7e4c22c47c1aa86eb73e70063cd378a2aa6ee')
version('3.3.0', sha256='8bb312b9d9fd17336dc1f8b3ac82f021ca50e2034afc866098866176d985adc6')
version('3.2.0', sha256='9541efbf68f298f45914b4e837490647f4d5e472b4c0c04414a787d116a702b2')
version('3.1.0', sha256='f3b160b9213dd17aa15ddd45f6fb06017fe205359dbd1f7219aad59c98899f15')
version('2.4.13.2', sha256='4b00c110e6c54943cbbb7cf0d35c5bc148133ab2095ee4aaa0ac0a4f67c58080')
version('2.4.13.1', sha256='0d5ce5e0973e3a745f927d1ee097aaf909aae59f787be6d27a03d639e2d96bd7')
version('2.4.13', sha256='94ebcca61c30034d5fb16feab8ec12c8a868f5162d20a9f0396f0f5f6d8bbbff')
version('2.4.12.3', sha256='a4cbcd2d470860b0cf1f8faf504619c18a8ac38fd414c5a88ed3e94c963aa750')
version('2.4.12.2', sha256='150a165eb14a5ea74fb94dcc16ac7d668a6ff20a4449df2570734a2abaab9c0e')
version('2.4.12.1', sha256='c1564771f79304a2597ae4f74f44032021e3a46657e4a117060c08f5ed05ad83')
# Standard variants
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('lapack', default=True, description='Include Lapack library support')
variant('powerpc', default=False, description='Enable PowerPC for GCC')
variant('vsx', default=False, description='Enable POWER8 and above VSX (64-bit little-endian)')
variant('fast-math', default=False,
description='Enable -ffast-math (not recommended for GCC 4.6.x)')
# OpenCV modules
variant('calib3d', default=True, description='calib3d module')
variant('core', default=True, description='Include opencv_core module into the OpenCV build')
variant('cudacodec', default=False, description='Enable video encoding/decoding with CUDA')
variant('dnn', default=True, description='Build DNN support')
variant('features2d', default=True, description='features2d module')
variant('flann', default=True, description='flann module')
variant('highgui', default=True, description='Include opencv_highgui module into the OpenCV build')
variant('imgproc', default=True, description='Include opencv_imgproc module into the OpenCV build')
variant('java', default=True,
description='Activates support for Java')
variant('ml', default=True, description='Build ML support')
variant('python', default=True,
description='Enables the build of Python extensions')
variant('stitching', default=True, description='stitching module')
variant('superres', default=True, description='superres module')
variant('ts', default=True, description='Include opencv_ts module into the OpenCV build')
variant('video', default=True, description='video module')
variant('videostab', default=True, description='videostab module')
variant('videoio', default=True, description='videoio module')
# Optional 3rd party components
variant('eigen', default=True, description='Activates support for eigen')
variant('ipp', default=True, description='Activates support for IPP')
variant('ipp_iw', default=True, description='Build IPP IW from source')
variant('jasper', default=True, description='Activates support for JasPer')
variant('jpeg', default=True, description='Include JPEG support')
variant('opencl', default=True, description='Include OpenCL Runtime support')
variant('opencl_svm', default=True, description='Include OpenCL Shared Virtual Memory support')
variant('openclamdfft', default=True, description='Include OpenCL AMD OpenCL FFT library support')
variant('openclamdblas', default=True, description='Include OpenCL AMD OpenCL BLAS library support')
variant('openmp', default=True, description='Activates support for OpenMP threads')
variant('pthreads_pf', default=True, description='Use pthreads-based parallel_for')
variant('png', default=True, description='Include PNG support')
variant('qt', default=False, description='Activates support for QT')
variant('gtk', default=True, description='Activates support for GTK')
variant('tiff', default=True, description='Include TIFF support')
variant('vtk', default=True, description='Activates support for VTK')
variant('zlib', default=True, description='Build zlib from source')
variant('contrib', default=False, description='Adds in code from opencv_contrib.')
contrib_vers = ['4.1.0', '4.1.1', '4.2.0']
for cv in contrib_vers:
resource(name="contrib",
git='https://github.com/opencv/opencv_contrib.git',
tag="{0}".format(cv),
when='@{0}+contrib'.format(cv))
resource(name="contrib",
git='https://github.com/opencv/opencv_contrib.git',
tag="{0}".format(cv),
when='@{0}+cuda'.format(cv))
depends_on('hdf5', when='+contrib')
depends_on('hdf5', when='+cuda')
depends_on('blas', when='+lapack')
# Patch to fix conflict between CUDA and OpenCV (reproduced with 3.3.0
# and 3.4.1) header file that have the same name.Problem is fixed in
# the current development branch of OpenCV. See #8461 for more information.
patch('dnn_cuda.patch', when='@3.3.0:3.4.1+cuda+dnn')
patch('opencv3.2_cmake.patch', when='@3.2')
patch('opencv3.2_vtk.patch', when='@3.2+vtk')
patch('opencv3.2_regacyvtk.patch', when='@3.2+vtk')
patch('opencv3.2_ffmpeg.patch', when='@3.2+videoio')
patch('opencv3.2_python3.7.patch', when='@3.2+python')
patch('opencv3.2_fj.patch', when='@3.2 %fj')
depends_on('eigen', when='+eigen')
depends_on('zlib', when='+zlib')
depends_on('libpng', when='+png')
depends_on('jpeg', when='+jpeg')
depends_on('libtiff', when='+tiff')
depends_on('jasper', when='+jasper')
depends_on('cuda', when='+cuda')
depends_on('gtkplus', when='+gtk')
depends_on('vtk', when='+vtk')
depends_on('qt', when='+qt')
depends_on('java', when='+java')
depends_on('ant', when='+java', type='build')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('protobuf@3.5.0:', when='@3.4.1: +dnn')
depends_on('protobuf@3.1.0', when='@3.3.0:3.4.0 +dnn')
depends_on('ffmpeg', when='+videoio')
depends_on('mpi', when='+videoio')
# TODO For Cuda >= 10, make sure 'dynlink_nvcuvid.h' or 'nvcuvid.h'
# exists, otherwise build will fail
# See https://github.com/opencv/opencv_contrib/issues/1786
conflicts('cuda@10:', when='+cudacodec')
conflicts('cuda', when='~contrib', msg='cuda support requires +contrib')
# IPP is provided x86_64 only
conflicts('+ipp', when="arch=aarch64:")
extends('python', when='+python')
def cmake_args(self):
spec = self.spec
# Standard variants
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format((
'ON' if '+shared' in spec else 'OFF')),
'-DENABLE_PRECOMPILED_HEADERS:BOOL=OFF',
'-DWITH_LAPACK={0}'.format((
'ON' if '+lapack' in spec else 'OFF')),
'-DENABLE_POWERPC={0}'.format((
'ON' if '+powerpc' in spec else 'OFF')),
'-DENABLE_VSX={0}'.format((
'ON' if '+vsx' in spec else 'OFF')),
'-DENABLE_FAST_MATH={0}'.format((
'ON' if '+fast-math' in spec else 'OFF')),
]
# modules
args.extend([
'-DBUILD_opencv_calib3d={0}'.format((
'ON' if '+calib3d' in spec else 'OFF')),
'-DBUILD_opencv_core:BOOL={0}'.format((
'ON' if '+core' in spec else 'OFF')),
'-DBUILD_opencv_cudacodec={0}'.format((
'ON' if '+cudacodec' in spec else 'OFF')),
'-DBUILD_opencv_dnn:BOOL={0}'.format((
'ON' if '+dnn' in spec else 'OFF')),
'-DBUILD_opencv_features2d={0}'.format((
'ON' if '+features2d' in spec else 'OFF')),
'-DBUILD_opencv_flann={0}'.format((
'ON' if '+flann' in spec else 'OFF')),
'-DBUILD_opencv_highgui:BOOL={0}'.format((
'ON' if '+highgui' in spec else 'OFF')),
'-DBUILD_opencv_imgproc:BOOL={0}'.format((
'ON' if '+imgproc' in spec else 'OFF')),
'-DBUILD_opencv_java:BOOL={0}'.format((
'ON' if '+java' in spec else 'OFF')),
'-DBUILD_opencv_ml={0}'.format((
'ON' if '+ml' in spec else 'OFF')),
'-DBUILD_opencv_stitching={0}'.format((
'ON' if '+stitching' in spec else 'OFF')),
'-DBUILD_opencv_superres={0}'.format((
'ON' if '+superres' in spec else 'OFF')),
'-DBUILD_opencv_ts={0}'.format((
'ON' if '+ts' in spec else 'OFF')),
'-DBUILD_opencv_video={0}'.format((
'ON' if '+video' in spec else 'OFF')),
'-DBUILD_opencv_videostab={0}'.format((
'ON' if '+videostab' in spec else 'OFF')),
'-DBUILD_opencv_videoio={0}'.format((
'ON' if '+videoio' in spec else 'OFF')),
])
# 3rd party components
args.extend([
'-DBUILD_IPP_IW:BOOL={0}'.format((
'ON' if '+ipp_iw' in spec else 'OFF')),
'-DWITH_CUDA:BOOL={0}'.format((
'ON' if '+cuda' in spec else 'OFF')),
'-DWITH_EIGEN:BOOL={0}'.format((
'ON' if '+eigen' in spec else 'OFF')),
'-DWITH_IPP:BOOL={0}'.format((
'ON' if '+ipp' in spec else 'OFF')),
'-DWITH_JASPER:BOOL={0}'.format((
'ON' if '+jasper' in spec else 'OFF')),
'-DWITH_JPEG:BOOL={0}'.format((
'ON' if '+jpeg' in spec else 'OFF')),
'-DWITH_OPENCL:BOOL={0}'.format((
'ON' if '+opencl' in spec else 'OFF')),
'-DWITH_OPENCL_SVM:BOOL={0}'.format((
'ON' if '+opencl_svm' in spec else 'OFF')),
'-DWITH_OPENCLAMDFFT:BOOL={0}'.format((
'ON' if '+openclamdfft' in spec else 'OFF')),
'-DWITH_OPENCLAMDBLAS:BOOL={0}'.format((
'ON' if '+openclamdblas' in spec else 'OFF')),
'-DWITH_OPENMP:BOOL={0}'.format((
'ON' if '+openmp' in spec else 'OFF')),
'-DWITH_PTHREADS_PF:BOOL={0}'.format((
'ON' if '+pthreads_pf' in spec else 'OFF')),
'-DWITH_PNG:BOOL={0}'.format((
'ON' if '+png' in spec else 'OFF')),
'-DWITH_QT:BOOL={0}'.format((
'ON' if '+qt' in spec else 'OFF')),
'-DWITH_TIFF:BOOL={0}'.format((
'ON' if '+tiff' in spec else 'OFF')),
'-DWITH_VTK:BOOL={0}'.format((
'ON' if '+vtk' in spec else 'OFF')),
'-DWITH_PROTOBUF:BOOL={0}'.format((
'ON' if '@3.3.0: +dnn' in spec else 'OFF')),
'-DBUILD_PROTOBUF:BOOL=OFF',
'-DPROTOBUF_UPDATE_FILES={0}'.format('ON')
])
if '+contrib' in spec or '+cuda' in spec:
args.append('-DOPENCV_EXTRA_MODULES_PATH={0}'.format(
join_path(self.stage.source_path, 'opencv_contrib/modules')))
if '+cuda' in spec:
if spec.variants['cuda_arch'].value[0] != 'none':
cuda_arch = [x for x in spec.variants['cuda_arch'].value if x]
args.append('-DCUDA_ARCH_BIN={0}'.format(
' '.join(cuda_arch)))
# Media I/O
if '+zlib' in spec:
zlib = spec['zlib']
args.extend([
'-DZLIB_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
zlib.libs[0]),
'-DZLIB_INCLUDE_DIR:PATH={0}'.format(
zlib.headers.directories[0])
])
if '+png' in spec:
libpng = spec['libpng']
args.extend([
'-DPNG_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
libpng.libs[0]),
'-DPNG_INCLUDE_DIR:PATH={0}'.format(
libpng.headers.directories[0])
])
if '+jpeg' in spec:
libjpeg = spec['jpeg']
args.extend([
'-DBUILD_JPEG:BOOL=OFF',
'-DJPEG_LIBRARY:FILEPATH={0}'.format(libjpeg.libs[0]),
'-DJPEG_INCLUDE_DIR:PATH={0}'.format(
libjpeg.headers.directories[0])
])
if '+tiff' in spec:
libtiff = spec['libtiff']
args.extend([
'-DTIFF_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
libtiff.libs[0]),
'-DTIFF_INCLUDE_DIR:PATH={0}'.format(
libtiff.headers.directories[0])
])
if '+jasper' in spec:
jasper = spec['jasper']
args.extend([
'-DJASPER_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
jasper.libs[0]),
'-DJASPER_INCLUDE_DIR:PATH={0}'.format(
jasper.headers.directories[0])
])
# GUI
if '+gtk' not in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@3:' in spec:
args.extend([
'-DWITH_GTK:BOOL=ON',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@2:3' in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=ON'
])
# Python
if '+python' in spec:
python_exe = spec['python'].command.path
python_lib = spec['python'].libs[0]
python_include_dir = spec['python'].headers.directories[0]
if '^python@3:' in spec:
args.extend([
'-DBUILD_opencv_python3=ON',
'-DPYTHON3_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON3_LIBRARY={0}'.format(python_lib),
'-DPYTHON3_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python2=OFF',
])
elif '^python@2:3' in spec:
args.extend([
'-DBUILD_opencv_python2=ON',
'-DPYTHON2_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON2_LIBRARY={0}'.format(python_lib),
'-DPYTHON2_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python3=OFF',
])
else:
args.extend([
'-DBUILD_opencv_python2=OFF',
'-DBUILD_opencv_python3=OFF'
])
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
"libopencv_*", root=self.prefix, shared=shared, recursive=True
)
|
rspavel/spack
|
var/spack/repos/builtin/packages/opencv/package.py
|
Python
|
lgpl-2.1
| 18,013
|
[
"VTK"
] |
56c9a7f5efdcda46ff70eeb1be8de69766e5ee0cdcc5d48b0ad40869e15a43a5
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-ban-site
# Author : Stuart Paterson
########################################################################
"""
Remove Site from Active mask for current Setup
Example:
$ dirac-admin-ban-site LCG.IN2P3.fr "Pilot installation problems"
"""
import time
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
Script.registerSwitch("E:", "email=", "Boolean True/False (True by default)")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("Site: Name of the Site")
Script.registerArgument("Comment: Reason of the action")
Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit, gConfig, gLogger
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
def getBoolean(value):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
Script.showHelp()
email = True
for switch in Script.getUnprocessedSwitches():
if switch[0] == "email":
email = getBoolean(switch[1])
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue("/DIRAC/Setup", "")
if not setup:
print("ERROR: Could not contact Configuration Service")
exitCode = 2
DIRACExit(exitCode)
# result = promptUser(
# 'All the elements that are associated with this site will be banned,'
# 'are you sure about this action?'
# )
# if not result['OK'] or result['Value'] is 'n':
# print 'Script stopped'
# DIRACExit( 0 )
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
site, comment = Script.getPositionalArgs(group=True)
result = diracAdmin.banSite(site, comment, printOutput=True)
if not result["OK"]:
errorList.append((site, result["Message"]))
exitCode = 2
else:
if email:
userName = diracAdmin._getCurrentUser()
if not userName["OK"]:
print("ERROR: Could not obtain current username from proxy")
exitCode = 2
DIRACExit(exitCode)
userName = userName["Value"]
subject = "%s is banned for %s setup" % (site, setup)
body = "Site %s is removed from site mask for %s setup by %s on %s.\n\n" % (
site,
setup,
userName,
time.asctime(),
)
body += "Comment:\n%s" % comment
addressPath = "EMail/Production"
address = Operations().getValue(addressPath, "")
if not address:
gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
else:
result = diracAdmin.sendMail(address, subject, body)
else:
print("Automatic email disabled by flag.")
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_ban_site.py
|
Python
|
gpl-3.0
| 3,372
|
[
"DIRAC"
] |
f39601e30358a9adb415e46163eace278fc7adff1da78cad255535c731f9247b
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting AST to code.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Use six for compatibility here.
import atexit
import imp
import os
import tempfile
import astor
import gast
from tensorflow.python.autograph.pyct import origin_info
def ast_to_source(node, indentation=' '):
"""Return the source code of given AST.
Args:
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
Returns:
code: The source code generated from the AST object
source_mapping: A mapping between the user and AutoGraph generated code.
"""
if not isinstance(node, (list, tuple)):
node = (node,)
generator = astor.code_gen.SourceGenerator(indentation, False,
astor.string_repr.pretty_string)
for n in node:
if isinstance(n, gast.AST):
n = gast.gast_to_ast(n)
generator.visit(n)
generator.result.append('\n')
# In some versions of Python, literals may appear as actual values. This
# ensures everything is string.
code = ''.join(map(str, generator.result))
# Strip leading blank lines.
code_lines = code.split('\n')
trimmed_code_lines = []
for l in code_lines:
if l.rstrip() or trimmed_code_lines:
trimmed_code_lines.append(l)
code = '\n'.join(trimmed_code_lines)
# Work around the reference cycle generated by astor.
# See https://github.com/berkerpeksag/astor/blob/55dd323f7d8d696610c703c0296763c567685c31/astor/code_gen.py#L162 # pylint:disable=line-too-long
# Reference cycles are quite disliked by TensorFlow's tests.
if hasattr(generator, 'write'):
generator.write = None
del generator
return code
def _source_to_module(source, delete_on_exit):
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
module_name = os.path.basename(f.name[:-3])
f.write(source)
# TODO(mdan): Try flush() and delete=False instead.
if delete_on_exit:
atexit.register(lambda: os.remove(f.name))
return imp.load_source(module_name, f.name), f.name
def ast_to_object(nodes,
indentation=' ',
include_source_map=False,
source_prefix=None,
delete_on_exit=True):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST
object.
indentation: Text, the string to use for indentation.
include_source_map: bool, whether to attach a source map to the compiled
object. Also see origin_info.py.
source_prefix: Optional[Text], string to print as-is into the source file.
delete_on_exit: bool, whether to delete the temporary file used for
compilation on exit.
Returns:
(module, source): A compiled module, and the source code of the module.
Raises:
ValueError: If ag_source_map__ is already in the namespace of the compiled
nodes.
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
source = ast_to_source(nodes, indentation=indentation)
if source_prefix:
source = source_prefix + '\n' + source
module, filename = _source_to_module(source, delete_on_exit)
if include_source_map:
if isinstance(nodes, (list, tuple)):
indices = range(-len(nodes), 0)
else:
indices = (-1,)
source_map = origin_info.create_source_map(nodes, source, filename, indices)
# TODO(znado): Clean this up so we don't need to attach it to the namespace.
# We cannot get the rewritten function name until it is too late so
# templating is hard, and this cleanly fixes the issues encountered with
# nested functions because this is attached to the outermost one.
# TODO(mdan): This name should be decided by the caller.
source_map_name = 'ag_source_map__'
assert source_map_name not in module.__dict__, (
'cannot convert %s because is has namespace attribute "%s", which is '
'reserved for AutoGraph.') % (module, source_map_name)
module.__dict__[source_map_name] = source_map
return module, source
|
jbedorf/tensorflow
|
tensorflow/python/autograph/pyct/compiler.py
|
Python
|
apache-2.0
| 4,980
|
[
"VisIt"
] |
7a2c7ad38f177d1a04a2ee32b60a01bb0642de0745cfed1532b94350b49159c9
|
#!/usr/bin/env python
from SPAdesPipeline.OLCspades.accessoryFunctions import *
__author__ = 'adamkoziol'
class DegeneratePrimers(object):
def objectifier(self):
import primerobject
# Initialise the primer object
self.runmetadata = primerobject.PrimerObject(self)
if self.batch:
printtime('Performing batch analyses', self.starttime)
self.runmetadata.batch()
else:
printtime('Performing single analysis', self.starttime)
self.runmetadata.single()
def degenerate(self):
"""
Creates all the possible primers from sequences with ambiguous characters
"""
import itertools
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.Seq import Seq
# Create the output file path/name
filename = '{}unambiguousprimers.fasta'.format(self.path)
with open(filename, 'wb') as unambiguous:
for sample in self.runmetadata.samples:
# Create a list of all possible forward and reverse primers
ambiguouslist = self.extend_ambiguous_dna(sample.forward, sample.reverse)
# Create a list of tuples of all possible primer pairs
ambiguouspairs = list(itertools.product(*ambiguouslist))
for iterator, pair in enumerate(ambiguouspairs):
# Zip together the forward and reverse primers in :pair with 'F' and 'R', respectively
for item in zip(pair, ['F', 'R']):
# The definition line will be something like: stx1a_F_0
definitionline = '{}_{}_{}'.format(sample.name, item[1], iterator)
# Convert the string of the sequence to a Seq
sequence = Seq(item[0])
# Create a sequence record using BioPython
fasta = SeqRecord(sequence,
# Without this, the header will be improperly formatted
description='',
# Use >:definitionline as the header
id=definitionline)
# Use the SeqIO module to properly format the new sequence record
SeqIO.write(fasta, unambiguous, "fasta")
@staticmethod
def extend_ambiguous_dna(forward, reverse):
"""
Return list of all possible sequences given an ambiguous DNA input
from: https://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence
"""
from Bio import Seq
from itertools import product
d = Seq.IUPAC.IUPACData.ambiguous_dna_values
return [list(map("".join, product(*map(d.get, forward)))), list(map("".join, product(*map(d.get, reverse))))]
def __init__(self, args, pipelinecommit, startingtime, scriptpath):
"""
:param args:
:param pipelinecommit:
:param startingtime:
:param scriptpath:
"""
# Initialise variables
self.commit = str(pipelinecommit)
self.starttime = startingtime
self.homepath = scriptpath
# Define variables based on supplied arguments
self.args = args
# Forward and reverse primers (if supplied)
self.forward = args.forwardprimer
self.reverse = args.reverseprimer
self.path = os.path.join(args.path, '')
assert os.path.isdir(self.path), u'Output location is not a valid directory {0!r:s}'.format(self.path)
self.batch = False
self.primerfile = ''
# If primer sequences are supplied, don't look for a primer file. If one or more of the primers are missing,
# then attempt to try the primer file
if not self.forward or not self.reverse:
print 'One or more of the primers was not provided. Attempting to find and use a supplied primer file.'
self.primerfile = args.primerfile
assert os.path.isfile(self.primerfile), u'Cannot file primer file {0!r:s}'.format(self.primerfile)
self.batch = True
# If both primers are provided, set them to uppercase
if self.forward and self.reverse:
self.forward = self.forward.upper()
self.reverse = self.reverse.upper()
#
self.runmetadata = MetadataObject()
# Create the objects
self.objectifier()
# Run the degeneration
self.degenerate()
if __name__ == '__main__':
import subprocess
import time
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
# Get the current commit of the pipeline from git
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Find the commit of the script by running a command to change to the directory containing the script and run
# a git command to return the short version of the commit hash
commit = subprocess.Popen('cd {} && git rev-parse --short HEAD'.format(homepath),
shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
# Parser for arguments
parser = ArgumentParser(description='Output all possible primer pairs from primers with degenerate bases.'
'You must provide either a file of all the primer pairs for a batch analysis'
'OR the forward and reverse primers for a single analysis')
# parser.add_argument('-v', '--version',
# version='%(prog)s commit {}'.format(commit))
parser.add_argument('path',
help='Specify input directory')
parser.add_argument('-p', '--primerfile',
help='Specify path and name of text file with degenerate primers. The format of the file '
'should be: "primername1,forwardprimer1,reverseprimer1\n'
'primername2,forwardprimer2,reverseprimer2\n"')
parser.add_argument('-f', '--forwardprimer',
help='Sequence of the forward primer')
parser.add_argument('-r', '--reverseprimer',
help='Sequence of the reverse primer')
# Get the arguments into an object
arguments = parser.parse_args()
# Define the start time
start = time.time()
# Run the script
DegeneratePrimers(arguments, commit, start, homepath)
# Print a bold, green exit statement
print '\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - start) + '\033[0m'
|
adamkoziol/vtyper
|
primerdegenerator.py
|
Python
|
mit
| 6,765
|
[
"Biopython"
] |
aac1e94f5ead80b5d49313e89ad560736e1cae2e6aaf94cfd44fe304359398d9
|
##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for easyconfig files.
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
import sys
import tempfile
from distutils.version import LooseVersion
from unittest import TestCase, TestLoader, main, skip
import easybuild.main as eb_main
import easybuild.tools.options as eboptions
from easybuild.base import fancylogger
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig.default import DEFAULT_CONFIG
from easybuild.framework.easyconfig.format.format import DEPENDENCY_PARAMETERS
from easybuild.framework.easyconfig.easyconfig import get_easyblock_class, letter_dir_for
from easybuild.framework.easyconfig.easyconfig import resolve_template
from easybuild.framework.easyconfig.parser import EasyConfigParser, fetch_parameters_from_easyconfig
from easybuild.framework.easyconfig.tools import check_sha256_checksums, dep_graph, get_paths_for, process_easyconfig
from easybuild.tools import config
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import GENERAL_CLASS, build_option
from easybuild.tools.filetools import change_dir, is_generic_easyblock, remove_file
from easybuild.tools.filetools import verify_checksum, which, write_file
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.modules import modules_tool
from easybuild.tools.py2vs3 import string_type, urlopen
from easybuild.tools.robot import check_conflicts, resolve_dependencies
from easybuild.tools.run import run_cmd
from easybuild.tools.options import set_tmpdir
from easybuild.tools.utilities import nub
# indicates whether all the single tests are OK,
# and that bigger tests (building dep graph, testing for conflicts, ...) can be run as well
# other than optimizing for time, this also helps to get around problems like http://bugs.python.org/issue10949
single_tests_ok = True
def is_pr():
"""Return true if run in a pull request CI"""
# $TRAVIS_PULL_REQUEST should be a PR number, otherwise we're not running tests for a PR
travis_pr_test = re.match('^[0-9]+$', os.environ.get('TRAVIS_PULL_REQUEST', ''))
# when testing a PR in GitHub Actions, $GITHUB_EVENT_NAME will be set to 'pull_request'
github_pr_test = os.environ.get('GITHUB_EVENT_NAME') == 'pull_request'
return travis_pr_test or github_pr_test
def get_target_branch():
"""Return the target branch of a pull request"""
# target branch should be anything other than 'master';
# usually is 'develop', but could also be a release branch like '3.7.x'
target_branch = os.environ.get('GITHUB_BASE_REF', None)
if not target_branch:
target_branch = os.environ.get('TRAVIS_BRANCH', None)
if not target_branch:
raise RuntimeError("Did not find a target branch")
return target_branch
def skip_if_not_pr_to_non_main_branch():
if not is_pr():
return skip("Only run for pull requests")
if get_target_branch() == "main":
return skip("Not run for pull requests against main")
return lambda func: func
def get_eb_files_from_diff(diff_filter):
"""Return the files changed on HEAD relative to the current target branch"""
target_branch = get_target_branch()
# relocate to top-level directory of repository to run 'git diff' command
top_dir = os.path.dirname(os.path.dirname(get_paths_for('easyconfigs')[0]))
cwd = change_dir(top_dir)
# first determine the 'merge base' between target branch and PR branch
# cfr. https://git-scm.com/docs/git-merge-base
cmd = "git merge-base %s HEAD" % target_branch
out, ec = run_cmd(cmd, simple=False, log_ok=False)
if ec == 0:
merge_base = out.strip()
print("Merge base for %s and HEAD: %s" % (target_branch, merge_base))
else:
msg = "Failed to determine merge base (ec: %s, output: '%s'), "
msg += "falling back to specifying target branch %s"
print(msg % (ec, out, target_branch))
merge_base = target_branch
# determine list of changed files using 'git diff' and merge base determined above
cmd = "git diff --name-only --diff-filter=%s %s..HEAD --" % (diff_filter, merge_base)
out, _ = run_cmd(cmd, simple=False)
files = [os.path.basename(f) for f in out.strip().split('\n') if f.endswith('.eb')]
change_dir(cwd)
return files
class EasyConfigTest(TestCase):
"""Baseclass for easyconfig testcases."""
@classmethod
def setUpClass(cls):
"""Setup environment for all tests. Called once!"""
# make sure that the EasyBuild installation is still known even if we purge an EB module
if os.getenv('EB_SCRIPT_PATH') is None:
eb_path = which('eb')
if eb_path is not None:
os.environ['EB_SCRIPT_PATH'] = eb_path
# initialize configuration (required for e.g. default modules_tool setting)
eb_go = eboptions.parse_options(args=[]) # Ignore cmdline args as those are meant for the unittest framework
config.init(eb_go.options, eb_go.get_options_by_section('config'))
build_options = {
'check_osdeps': False,
'external_modules_metadata': {},
'force': True,
'local_var_naming_check': 'error',
'optarch': 'test',
'robot_path': get_paths_for("easyconfigs")[0],
'silent': True,
'suffix_modules_path': GENERAL_CLASS,
'valid_module_classes': config.module_classes(),
'valid_stops': [x[0] for x in EasyBlock.get_steps()],
}
config.init_build_options(build_options=build_options)
set_tmpdir()
# put dummy 'craype-test' module in place, which is required for parsing easyconfigs using Cray* toolchains
cls.TMPDIR = tempfile.mkdtemp()
os.environ['MODULEPATH'] = cls.TMPDIR
write_file(os.path.join(cls.TMPDIR, 'craype-test'), '#%Module\n')
log = fancylogger.getLogger("EasyConfigTest", fname=False)
# make sure a logger is present for main
eb_main._log = log
cls._ordered_specs = None
cls._parsed_easyconfigs = []
cls._parsed_all_easyconfigs = False
cls._changed_ecs = None # ECs changed in a PR
@classmethod
def tearDownClass(cls):
"""Cleanup after running all tests"""
shutil.rmtree(cls.TMPDIR)
@classmethod
def parse_all_easyconfigs(cls):
"""Parse all easyconfigs."""
if cls._parsed_all_easyconfigs:
return
# all available easyconfig files
easyconfigs_path = get_paths_for("easyconfigs")[0]
specs = glob.glob('%s/*/*/*.eb' % easyconfigs_path)
parsed_specs = set(ec['spec'] for ec in cls._parsed_easyconfigs)
for spec in specs:
if spec not in parsed_specs:
cls._parsed_easyconfigs.extend(process_easyconfig(spec))
cls._parsed_all_easyconfigs = True
@classmethod
def resolve_all_dependencies(cls):
"""Resolve dependencies between easyconfigs"""
# Parse all easyconfigs if not done yet
cls.parse_all_easyconfigs()
# filter out external modules
for ec in cls._parsed_easyconfigs:
for dep in ec['dependencies'][:]:
if dep.get('external_module', False):
ec['dependencies'].remove(dep)
cls._ordered_specs = resolve_dependencies(
cls._parsed_easyconfigs, modules_tool(), retain_all_deps=True)
def _get_changed_easyconfigs(self):
"""Gather all added or modified easyconfigs"""
# get list of changed easyconfigs
changed_ecs_filenames = get_eb_files_from_diff(diff_filter='M')
added_ecs_filenames = get_eb_files_from_diff(diff_filter='A')
if changed_ecs_filenames:
print("\nList of changed easyconfig files in this PR:\n\t%s" % '\n\t'.join(changed_ecs_filenames))
if added_ecs_filenames:
print("\nList of added easyconfig files in this PR:\n\t%s" % '\n\t'.join(added_ecs_filenames))
EasyConfigTest._changed_ecs_filenames = changed_ecs_filenames
EasyConfigTest._added_ecs_filenames = added_ecs_filenames
# grab parsed easyconfigs for changed easyconfig files
changed_ecs = []
for ec_fn in changed_ecs_filenames + added_ecs_filenames:
match = None
for ec in self.parsed_easyconfigs:
if os.path.basename(ec['spec']) == ec_fn:
match = ec['ec']
break
if match:
changed_ecs.append(match)
else:
# if no easyconfig is found, it's possible some archived easyconfigs were touched in the PR...
# so as a last resort, try to find the easyconfig file in __archive__
easyconfigs_path = get_paths_for("easyconfigs")[0]
specs = glob.glob('%s/__archive__/*/*/%s' % (easyconfigs_path, ec_fn))
if len(specs) == 1:
ec = process_easyconfig(specs[0])[0]
changed_ecs.append(ec['ec'])
else:
raise RuntimeError("Failed to find parsed easyconfig for %s"
" (and could not isolate it in easyconfigs archive either)" % ec_fn)
EasyConfigTest._changed_ecs = changed_ecs
@property
def parsed_easyconfigs(self):
# parse all easyconfigs if they haven't been already
EasyConfigTest.parse_all_easyconfigs()
return EasyConfigTest._parsed_easyconfigs
@property
def ordered_specs(self):
# Resolve dependencies if not done
if EasyConfigTest._ordered_specs is None:
EasyConfigTest.resolve_all_dependencies()
return EasyConfigTest._ordered_specs
@property
def changed_ecs_filenames(self):
if EasyConfigTest._changed_ecs is None:
self._get_changed_easyconfigs()
return EasyConfigTest._changed_ecs_filenames
@property
def added_ecs_filenames(self):
if EasyConfigTest._changed_ecs is None:
self._get_changed_easyconfigs()
return EasyConfigTest._added_ecs_filenames
@property
def changed_ecs(self):
if EasyConfigTest._changed_ecs is None:
self._get_changed_easyconfigs()
return EasyConfigTest._changed_ecs
def test_dep_graph(self):
"""Unit test that builds a full dependency graph."""
# pygraph dependencies required for constructing dependency graph are not available prior to Python 2.6
if LooseVersion(sys.version) >= LooseVersion('2.6') and single_tests_ok:
# temporary file for dep graph
(hn, fn) = tempfile.mkstemp(suffix='.dot')
os.close(hn)
dep_graph(fn, self.ordered_specs)
remove_file(fn)
else:
print("(skipped dep graph test)")
def test_conflicts(self):
"""Check whether any conflicts occur in software dependency graphs."""
if not single_tests_ok:
print("(skipped conflicts test)")
return
self.assertFalse(check_conflicts(self.ordered_specs, modules_tool(), check_inter_ec_conflicts=False),
"No conflicts detected")
def check_dep_vars(self, gen, dep, dep_vars):
"""Check whether available variants of a particular dependency are acceptable or not."""
# 'guilty' until proven 'innocent'
res = False
# filter out wrapped Java versions
# i.e. if the version of one is a prefix of the version of the other one (e.g. 1.8 & 1.8.0_181)
if dep == 'Java':
dep_vars_to_check = sorted(dep_vars.keys())
retained_dep_vars = []
while dep_vars_to_check:
dep_var = dep_vars_to_check.pop()
dep_var_version = dep_var.split(';')[0]
# remove dep vars wrapped by current dep var
dep_vars_to_check = [x for x in dep_vars_to_check if not x.startswith(dep_var_version + '.')]
retained_dep_vars = [x for x in retained_dep_vars if not x.startswith(dep_var_version + '.')]
retained_dep_vars.append(dep_var)
for key in list(dep_vars.keys()):
if key not in retained_dep_vars:
del dep_vars[key]
version_regex = re.compile('^version: (?P<version>[^;]+);')
# filter out binutils with empty versionsuffix which is used to build toolchain compiler
if dep == 'binutils' and len(dep_vars) > 1:
empty_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: ')]
if len(empty_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != empty_vsuff_vars[0])
# multiple variants of HTSlib is OK as long as they are deps for a matching version of BCFtools;
# same goes for WRF and WPS; Gurobi and Rgurobi
for dep_name, parent_name in [('HTSlib', 'BCFtools'), ('WRF', 'WPS'), ('Gurobi', 'Rgurobi')]:
if dep == dep_name and len(dep_vars) > 1:
for key in list(dep_vars):
ecs = dep_vars[key]
# filter out dep variants that are only used as dependency for parent with same version
dep_ver = version_regex.search(key).group('version')
if all(ec.startswith('%s-%s-' % (parent_name, dep_ver)) for ec in ecs) and len(dep_vars) > 1:
dep_vars.pop(key)
# multiple versions of Boost is OK as long as they are deps for a matching Boost.Python
if dep == 'Boost' and len(dep_vars) > 1:
for key in list(dep_vars):
ecs = dep_vars[key]
# filter out Boost variants that are only used as dependency for Boost.Python with same version
boost_ver = version_regex.search(key).group('version')
if all(ec.startswith('Boost.Python-%s-' % boost_ver) for ec in ecs):
dep_vars.pop(key)
# filter out Perl with -minimal versionsuffix which are only used in makeinfo-minimal
if dep == 'Perl':
minimal_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -minimal')]
if len(minimal_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != minimal_vsuff_vars[0])
# filter out FFTW and imkl with -serial versionsuffix which are used in non-MPI subtoolchains
if dep in ['FFTW', 'imkl']:
serial_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -serial')]
if len(serial_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != serial_vsuff_vars[0])
# filter out BLIS and libFLAME with -amd versionsuffix
# (AMD forks, used in gobff/*-amd toolchains)
if dep in ['BLIS', 'libFLAME']:
amd_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -amd')]
if len(amd_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != amd_vsuff_vars[0])
# filter out ScaLAPACK with -BLIS-* versionsuffix, used in goblf toolchain
if dep == 'ScaLAPACK':
blis_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -BLIS-' in v]
if len(blis_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != blis_vsuff_vars[0])
if dep == 'ScaLAPACK':
# filter out ScaLAPACK with -bf versionsuffix, used in gobff toolchain
bf_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -bf' in v]
if len(bf_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != bf_vsuff_vars[0])
# filter out ScaLAPACK with -bl versionsuffix, used in goblf toolchain
bl_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -bl' in v]
if len(bl_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != bl_vsuff_vars[0])
# filter out HDF5 with -serial versionsuffix which is used in HDF5 for Python (h5py)
if dep in ['HDF5']:
serial_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -serial')]
if len(serial_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != serial_vsuff_vars[0])
# for some dependencies, we allow exceptions for software that depends on a particular version,
# as long as that's indicated by the versionsuffix
versionsuffix_deps = ['ASE', 'Boost', 'CUDAcore', 'Java', 'Lua',
'PLUMED', 'PyTorch', 'R', 'TensorFlow']
if dep in versionsuffix_deps and len(dep_vars) > 1:
# check for '-CUDA-*' versionsuffix for CUDAcore dependency
if dep == 'CUDAcore':
dep = 'CUDA'
for key in list(dep_vars):
dep_ver = version_regex.search(key).group('version')
# use version of Java wrapper rather than full Java version
if dep == 'Java':
dep_ver = '.'.join(dep_ver.split('.')[:2])
# filter out dep version if all easyconfig filenames using it include specific dep version
if all(re.search('-%s-%s' % (dep, dep_ver), v) for v in dep_vars[key]):
dep_vars.pop(key)
# always retain at least one dep variant
if len(dep_vars) == 1:
break
# filter R dep for a specific version of Python 2.x
if dep == 'R' and len(dep_vars) > 1:
for key in list(dep_vars):
if '; versionsuffix: -Python-2' in key:
dep_vars.pop(key)
# always retain at least one variant
if len(dep_vars) == 1:
break
# filter out variants that are specific to a particular version of CUDA
cuda_dep_vars = [v for v in dep_vars.keys() if '-CUDA' in v]
if len(dep_vars) >= len(cuda_dep_vars) and len(dep_vars) > 1:
for key in list(dep_vars):
if re.search('; versionsuffix: .*-CUDA-[0-9.]+', key):
dep_vars.pop(key)
# always retain at least one dep variant
if len(dep_vars) == 1:
break
# some software packages require a specific (older/newer) version of a particular dependency
old_dep_versions = {
# EMAN2 2.3 requires Boost(.Python) 1.64.0
'Boost': [('1.64.0;', [r'Boost.Python-1\.64\.0-', r'EMAN2-2\.3-'])],
'Boost.Python': [('1.64.0;', [r'EMAN2-2\.3-'])],
# VMTK 1.4.x requires ITK 4.13.x
'ITK': [(r'4\.13\.', [r'VMTK-1\.4\.'])],
# Kraken 1.x requires Jellyfish 1.x (Roary & metaWRAP depend on Kraken 1.x)
'Jellyfish': [(r'1\.', [r'Kraken-1\.', r'Roary-3\.12\.0', r'metaWRAP-1\.2'])],
# Libint 1.1.6 is required by older CP2K versions
'Libint': [(r'1\.1\.6', [r'CP2K-[3-6]'])],
# libxc 2.x or 3.x is required by ABINIT, AtomPAW, CP2K, GPAW, horton, PySCF, WIEN2k
# (Qiskit depends on PySCF), Elk 7.x requires libxc >= 5
'libxc': [
(r'[23]\.', [r'ABINIT-', r'AtomPAW-', r'CP2K-', r'GPAW-', r'horton-',
r'PySCF-', r'Qiskit-', r'WIEN2k-']),
(r'5\.', [r'Elk-']),
],
# some software depends on numba, which typically requires an older LLVM;
# this includes BirdNET, cell2location, cryoDRGN, librosa, PyOD, Python-Geometric, scVelo, scanpy
'LLVM': [
# numba 0.47.x requires LLVM 7.x or 8.x (see https://github.com/numba/llvmlite#compatibility)
(r'8\.', [r'numba-0\.47\.0-', r'librosa-0\.7\.2-', r'BirdNET-20201214-',
r'scVelo-0\.1\.24-', r'PyTorch-Geometric-1\.[346]\.[23]']),
(r'10\.0\.1', [r'cell2location-0\.05-alpha-', r'cryoDRGN-0\.3\.2-', r'loompy-3\.0\.6-',
r'numba-0\.52\.0-', r'PyOD-0\.8\.7-', r'PyTorch-Geometric-1\.6\.3',
r'scanpy-1\.7\.2-', r'umap-learn-0\.4\.6-']),
],
'Lua': [
# SimpleITK 2.1.0 requires Lua 5.3.x, MedPy and nnU-Net depend on SimpleITK
(r'5\.3\.5', [r'nnU-Net-1\.7\.0-', r'MedPy-0\.4\.0-', r'SimpleITK-2\.1\.0-']),
],
# TensorFlow 2.5+ requires a more recent NCCL than version 2.4.8 used in 2019b generation;
# Horovod depends on TensorFlow, so same exception required there
'NCCL': [(r'2\.11\.4', [r'TensorFlow-2\.[5-9]\.', r'Horovod-0\.2[2-9]'])],
# rampart requires nodejs > 10, artic-ncov2019 requires rampart
'nodejs': [('12.16.1', ['rampart-1.2.0rc3-', 'artic-ncov2019-2020.04.13'])],
# some software depends on an older numba;
# this includes BirdNET, cell2location, cryoDRGN, librosa, PyOD, Python-Geometric, scVelo, scanpy
'numba': [
(r'0\.52\.0', [r'cell2location-0\.05-alpha-', r'cryoDRGN-0\.3\.2-', r'loompy-3\.0\.6-',
r'PyOD-0\.8\.7-', r'PyTorch-Geometric-1\.6\.3', r'scanpy-1\.7\.2-',
r'umap-learn-0\.4\.6-']),
],
# OPERA requires SAMtools 0.x
'SAMtools': [(r'0\.', [r'ChimPipe-0\.9\.5', r'Cufflinks-2\.2\.1', r'OPERA-2\.0\.6',
r'CGmapTools-0\.1\.2', r'BatMeth2-2\.1'])],
# NanoPlot, NanoComp use an older version of Seaborn
'Seaborn': [(r'0\.10\.1', [r'NanoComp-1\.13\.1-', r'NanoPlot-1\.33\.0-'])],
# Shasta requires spoa 3.x
'spoa': [(r'3\.4\.0', [r'Shasta-0\.8\.0-'])],
'TensorFlow': [
# medaka 0.11.4/0.12.0 requires recent TensorFlow <= 1.14 (and Python 3.6),
# artic-ncov2019 requires medaka
('1.13.1;', ['medaka-0.11.4-', 'medaka-0.12.0-', 'artic-ncov2019-2020.04.13']),
# medaka 1.1.* and 1.2.* requires TensorFlow 2.2.0
# (while other 2019b easyconfigs use TensorFlow 2.1.0 as dep);
# TensorFlow 2.2.0 is also used as a dep for Horovod 0.19.5;
# decona 0.1.2 and NGSpeciesID 0.1.1.1 depend on medaka 1.1.3
('2.2.0;', ['medaka-1.2.[0]-', 'medaka-1.1.[13]-', 'Horovod-0.19.5-', 'decona-0.1.2-',
'NGSpeciesID-0.1.1.1-']),
# medaka 1.4.3 (foss/2019b) depends on TensorFlow 2.2.2
('2.2.2;', ['medaka-1.4.3-']),
# medaka 1.4.3 (foss/2020b) depends on TensorFlow 2.2.3; longread_umi and artic depend on medaka
('2.2.3;', ['medaka-1.4.3-', 'artic-ncov2019-2021.06.24-', 'longread_umi-0.3.2-']),
],
# for the sake of backwards compatibility, keep UCX-CUDA v1.11.0 which depends on UCX v1.11.0
# (for 2021b, UCX was updated to v1.11.2)
'UCX': [('1.11.0;', ['UCX-CUDA-1.11.0-'])],
# medaka 1.1.*, 1.2.*, 1.4.* requires Pysam 0.16.0.1,
# which is newer than what others use as dependency w.r.t. Pysam version in 2019b generation;
# decona 0.1.2 and NGSpeciesID 0.1.1.1 depend on medaka 1.1.3
'Pysam': [('0.16.0.1;', ['medaka-1.2.[0]-', 'medaka-1.1.[13]-', 'medaka-1.4.3-', 'decona-0.1.2-',
'NGSpeciesID-0.1.1.1-'])],
# UShER requires tbb-2020.3 as newer versions will not build
'tbb': [('2020.3', ['UShER-0.5.0-'])],
}
if dep in old_dep_versions and len(dep_vars) > 1:
for key in list(dep_vars):
for version_pattern, parents in old_dep_versions[dep]:
# filter out known old dependency versions
if re.search('^version: %s' % version_pattern, key):
# only filter if the easyconfig using this dep variants is known
if all(any(re.search(p, x) for p in parents) for x in dep_vars[key]):
dep_vars.pop(key)
# filter out ELSI variants with -PEXSI suffix
if dep == 'ELSI' and len(dep_vars) > 1:
pexsi_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -PEXSI')]
if len(pexsi_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != pexsi_vsuff_vars[0])
# only single variant is always OK
if len(dep_vars) == 1:
res = True
elif len(dep_vars) == 2 and dep in ['Python', 'Tkinter']:
# for Python & Tkinter, it's OK to have on 2.x and one 3.x version
v2_dep_vars = [x for x in dep_vars.keys() if x.startswith('version: 2.')]
v3_dep_vars = [x for x in dep_vars.keys() if x.startswith('version: 3.')]
if len(v2_dep_vars) == 1 and len(v3_dep_vars) == 1:
res = True
# two variants is OK if one is for Python 2.x and the other is for Python 3.x (based on versionsuffix)
elif len(dep_vars) == 2:
py2_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-2.' in x]
py3_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-3.' in x]
if len(py2_dep_vars) == 1 and len(py3_dep_vars) == 1:
res = True
# for recent generations, there's no versionsuffix anymore for Python 3,
# but we still allow variants depending on Python 2.x + 3.x
is_recent_gen = False
full_toolchain_regex = re.compile(r'^20[1-9][0-9][ab]$')
gcc_toolchain_regex = re.compile(r'^GCC(core)?-[0-9]?[0-9]\.[0-9]$')
if full_toolchain_regex.match(gen):
is_recent_gen = LooseVersion(gen) >= LooseVersion('2020b')
elif gcc_toolchain_regex.match(gen):
genver = gen.split('-', 1)[1]
is_recent_gen = LooseVersion(genver) >= LooseVersion('10.2')
else:
raise EasyBuildError("Unkown type of toolchain generation: %s" % gen)
if is_recent_gen:
py2_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-2.' in x]
py3_dep_vars = [x for x in dep_vars.keys() if x.strip().endswith('; versionsuffix:')]
if len(py2_dep_vars) == 1 and len(py3_dep_vars) == 1:
res = True
return res
def test_check_dep_vars(self):
"""Test check_dep_vars utility method."""
# one single dep version: OK
self.assertTrue(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb', 'bar-4.5.6.eb'],
}))
self.assertTrue(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix: -test': ['foo-1.2.3.eb', 'bar-4.5.6.eb'],
}))
# two or more dep versions (no special case: not OK)
self.assertFalse(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb'],
'version: 4.5.6; versionsuffix:': ['bar-4.5.6.eb'],
}))
self.assertFalse(self.check_dep_vars('2019b', 'testdep', {
'version: 0.0; versionsuffix:': ['foobar-0.0.eb'],
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb'],
'version: 4.5.6; versionsuffix:': ['bar-4.5.6.eb'],
}))
# Java is a special case, with wrapped Java versions
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
}))
# two Java wrappers is not OK
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK to have two or more wrappers if versionsuffix is used to indicate exception
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# versionsuffix must be there for all easyconfigs to indicate exception
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb', 'bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb', 'bar-4.5.6.eb'],
}))
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 12.1.6; versionsuffix:': ['foobar-0.0-Java-12.eb'],
'version: 12; versionsuffix:': ['foobar-0.0-Java-12.eb'],
}))
# strange situation: odd number of Java versions
# not OK: two Java wrappers (and no versionsuffix to indicate exception)
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK because of -Java-11 versionsuffix
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# not OK: two Java wrappers (and no versionsuffix to indicate exception)
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK because of -Java-11 versionsuffix
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# two different versions of Boost is not OK
self.assertFalse(self.check_dep_vars('2019b', 'Boost', {
'version: 1.64.0; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
# a different Boost version that is only used as dependency for a matching Boost.Python is fine
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.66.0; versionsuffix:': ['Boost.Python-1.66.0-gompi-2019a.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertFalse(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.66.0; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertTrue(self.check_dep_vars('2018a', 'Boost', {
'version: 1.63.0; versionsuffix: -Python-2.7.14': ['EMAN2-2.21a-foss-2018a-Python-2.7.14-Boost-1.63.0.eb'],
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2018a.eb'],
'version: 1.66.0; versionsuffix:': ['BLAST+-2.7.1-foss-2018a.eb'],
}))
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': [
'Boost.Python-1.64.0-gompi-2019a.eb',
'EMAN2-2.3-foss-2019a-Python-2.7.15.eb',
],
'version: 1.70.0; versionsuffix:': [
'BLAST+-2.9.0-gompi-2019a.eb',
'Boost.Python-1.70.0-gompi-2019a.eb',
],
}))
# two variants is OK, if they're for Python 2.x and 3.x
self.assertTrue(self.check_dep_vars('2020a', 'Python', {
'version: 2.7.18; versionsuffix:': ['SciPy-bundle-2020.03-foss-2020a-Python-2.7.18.eb'],
'version: 3.8.2; versionsuffix:': ['SciPy-bundle-2020.03-foss-2020a-Python-3.8.2.eb'],
}))
self.assertTrue(self.check_dep_vars('2020a', 'SciPy-bundle', {
'version: 2020.03; versionsuffix: -Python-2.7.18': ['matplotlib-3.2.1-foss-2020a-Python-2.7.18.eb'],
'version: 2020.03; versionsuffix: -Python-3.8.2': ['matplotlib-3.2.1-foss-2020a-Python-3.8.2.eb'],
}))
# for recent easyconfig generations, there's no versionsuffix anymore for Python 3
self.assertTrue(self.check_dep_vars('2020b', 'Python', {
'version: 2.7.18; versionsuffix:': ['SciPy-bundle-2020.11-foss-2020b-Python-2.7.18.eb'],
'version: 3.8.6; versionsuffix:': ['SciPy-bundle-2020.11-foss-2020b.eb'],
}))
self.assertTrue(self.check_dep_vars('GCCcore-10.2', 'PyYAML', {
'version: 5.3.1; versionsuffix:': ['IPython-7.18.1-GCCcore-10.2.0.eb'],
'version: 5.3.1; versionsuffix: -Python-2.7.18': ['IPython-7.18.1-GCCcore-10.2.0-Python-2.7.18.eb'],
}))
self.assertTrue(self.check_dep_vars('2020b', 'SciPy-bundle', {
'version: 2020.11; versionsuffix: -Python-2.7.18': ['matplotlib-3.3.3-foss-2020b-Python-2.7.18.eb'],
'version: 2020.11; versionsuffix:': ['matplotlib-3.3.3-foss-2020b.eb'],
}))
# not allowed for older generations (foss/intel 2020a or older, GCC(core) 10.1.0 or older)
self.assertFalse(self.check_dep_vars('2020a', 'SciPy-bundle', {
'version: 2020.03; versionsuffix: -Python-2.7.18': ['matplotlib-3.2.1-foss-2020a-Python-2.7.18.eb'],
'version: 2020.03; versionsuffix:': ['matplotlib-3.2.1-foss-2020a.eb'],
}))
def test_dep_versions_per_toolchain_generation(self):
"""
Check whether there's only one dependency version per toolchain generation actively used.
This is enforced to try and limit the chance of running into conflicts when multiple modules built with
the same toolchain are loaded together.
"""
ecs_by_full_mod_name = dict((ec['full_mod_name'], ec) for ec in self.parsed_easyconfigs)
if len(ecs_by_full_mod_name) != len(self.parsed_easyconfigs):
self.fail('Easyconfigs with duplicate full_mod_name found')
# Cache already determined dependencies
ec_to_deps = dict()
def get_deps_for(ec):
"""Get list of (direct) dependencies for specified easyconfig."""
ec_mod_name = ec['full_mod_name']
deps = ec_to_deps.get(ec_mod_name)
if deps is None:
deps = []
for dep in ec['ec']['dependencies']:
dep_mod_name = dep['full_mod_name']
deps.append((dep['name'], dep['version'], dep['versionsuffix'], dep_mod_name))
# Note: Raises KeyError if dep not found
res = ecs_by_full_mod_name[dep_mod_name]
deps.extend(get_deps_for(res))
ec_to_deps[ec_mod_name] = deps
return deps
# some software also follows <year>{a,b} versioning scheme,
# which throws off the pattern matching done below for toolchain versions
false_positives_regex = re.compile('^MATLAB-Engine-20[0-9][0-9][ab]')
# restrict to checking dependencies of easyconfigs using common toolchains (start with 2018a)
# and GCCcore subtoolchain for common toolchains, starting with GCCcore 7.x
for pattern in ['20(1[89]|[2-9][0-9])[ab]', r'GCCcore-([7-9]|[1-9][0-9])\.[0-9]']:
all_deps = {}
regex = re.compile(r'^.*-(?P<tc_gen>%s).*\.eb$' % pattern)
# collect variants for all dependencies of easyconfigs that use a toolchain that matches
for ec in self.ordered_specs:
ec_file = os.path.basename(ec['spec'])
# take into account software which also follows a <year>{a,b} versioning scheme
ec_file = false_positives_regex.sub('', ec_file)
res = regex.match(ec_file)
if res:
tc_gen = res.group('tc_gen')
all_deps_tc_gen = all_deps.setdefault(tc_gen, {})
for dep_name, dep_ver, dep_versuff, dep_mod_name in get_deps_for(ec):
dep_variants = all_deps_tc_gen.setdefault(dep_name, {})
# a variant is defined by version + versionsuffix
variant = "version: %s; versionsuffix: %s" % (dep_ver, dep_versuff)
# keep track of which easyconfig this is a dependency
dep_variants.setdefault(variant, set()).add(ec_file)
# check which dependencies have more than 1 variant
multi_dep_vars, multi_dep_vars_msg = [], ''
for tc_gen in sorted(all_deps.keys()):
for dep in sorted(all_deps[tc_gen].keys()):
dep_vars = all_deps[tc_gen][dep]
if not self.check_dep_vars(tc_gen, dep, dep_vars):
multi_dep_vars.append(dep)
multi_dep_vars_msg += "\nfound %s variants of '%s' dependency " % (len(dep_vars), dep)
multi_dep_vars_msg += "in easyconfigs using '%s' toolchain generation\n* " % tc_gen
multi_dep_vars_msg += '\n* '.join("%s as dep for %s" % v for v in sorted(dep_vars.items()))
multi_dep_vars_msg += '\n'
error_msg = "No multi-variant deps found for '%s' easyconfigs:\n%s" % (regex.pattern, multi_dep_vars_msg)
self.assertFalse(multi_dep_vars, error_msg)
def test_sanity_check_paths(self):
"""Make sure specified sanity check paths adher to the requirements."""
for ec in self.parsed_easyconfigs:
ec_scp = ec['ec']['sanity_check_paths']
if ec_scp != {}:
# if sanity_check_paths is specified (i.e., non-default), it must adher to the requirements
# both 'files' and 'dirs' keys, both with list values and with at least one a non-empty list
error_msg = "sanity_check_paths for %s does not meet requirements: %s" % (ec['spec'], ec_scp)
self.assertEqual(sorted(ec_scp.keys()), ['dirs', 'files'], error_msg)
self.assertTrue(isinstance(ec_scp['dirs'], list), error_msg)
self.assertTrue(isinstance(ec_scp['files'], list), error_msg)
self.assertTrue(ec_scp['dirs'] or ec_scp['files'], error_msg)
def test_r_libs_site_env_var(self):
"""Make sure $R_LIBS_SITE is being updated, rather than $R_LIBS."""
# cfr. https://github.com/easybuilders/easybuild-easyblocks/pull/2326
r_libs_ecs = []
for ec in self.parsed_easyconfigs:
for key in ('modextrapaths', 'modextravars'):
if 'R_LIBS' in ec['ec'][key]:
r_libs_ecs.append(ec['spec'])
error_msg = "%d easyconfigs found which set $R_LIBS, should be $R_LIBS_SITE: %s"
self.assertEqual(r_libs_ecs, [], error_msg % (len(r_libs_ecs), ', '.join(r_libs_ecs)))
def test_easyconfig_locations(self):
"""Make sure all easyconfigs files are in the right location."""
easyconfig_dirs_regex = re.compile(r'/easybuild/easyconfigs/[0a-z]/[^/]+$')
topdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
for (dirpath, _, filenames) in os.walk(topdir):
# ignore git/svn dirs & archived easyconfigs
if '/.git/' in dirpath or '/.svn/' in dirpath or '__archive__' in dirpath:
continue
# check whether list of .eb files is non-empty
easyconfig_files = [fn for fn in filenames if fn.endswith('eb')]
if easyconfig_files:
# check whether path matches required pattern
if not easyconfig_dirs_regex.search(dirpath):
# only exception: TEMPLATE.eb
if not (dirpath.endswith('/easybuild/easyconfigs') and filenames == ['TEMPLATE.eb']):
self.assertTrue(False, "List of easyconfig files in %s is empty: %s" % (dirpath, filenames))
@skip_if_not_pr_to_non_main_branch()
def test_pr_sha256_checksums(self):
"""Make sure changed easyconfigs have SHA256 checksums in place."""
# list of software for which checksums can not be required,
# e.g. because 'source' files need to be constructed manually
whitelist = [
'Kent_tools-*',
'MATLAB-*',
'OCaml-*',
'OpenFOAM-Extend-4.1-*',
# sources for old versions of Bioconductor packages are no longer available,
# so not worth adding checksums for at this point
'R-bundle-Bioconductor-3.[2-5]',
]
# the check_sha256_checksums function (again) creates an EasyBlock instance
# for easyconfigs using the Bundle easyblock, this is a problem because the 'sources' easyconfig parameter
# is updated in place (sources for components are added to the 'parent' sources) in Bundle's __init__;
# therefore, we need to reset 'sources' to an empty list here if Bundle is used...
# likewise for 'patches' and 'checksums'
for ec in self.changed_ecs:
if ec['easyblock'] in ['Bundle', 'PythonBundle', 'EB_OpenSSL_wrapper'] or ec['name'] in ['Clang-AOMP']:
ec['sources'] = []
ec['patches'] = []
ec['checksums'] = []
# filter out deprecated easyconfigs
retained_changed_ecs = []
for ec in self.changed_ecs:
if not ec['deprecated']:
retained_changed_ecs.append(ec)
checksum_issues = check_sha256_checksums(retained_changed_ecs, whitelist=whitelist)
self.assertTrue(len(checksum_issues) == 0, "No checksum issues:\n%s" % '\n'.join(checksum_issues))
@skip_if_not_pr_to_non_main_branch()
def test_pr_python_packages(self):
"""Several checks for easyconfigs that install (bundles of) Python packages."""
# These packages do not support installation with 'pip'
whitelist_pip = [
r'ESMPy-.*',
r'MATLAB-Engine-.*',
r'Meld-.*',
r'PyTorch-.*',
]
whitelist_pip_check = [
r'Mako-1.0.4.*Python-2.7.12.*',
# no pip 9.x or newer for configparser easyconfigs using a 2016a or 2016b toolchain
r'configparser-3.5.0.*-2016[ab].*',
# mympirun is installed with system Python, pip may not be installed for system Python
r'vsc-mympirun.*',
]
failing_checks = []
python_default_urls = PythonPackage.extra_options()['source_urls'][0]
for ec in self.changed_ecs:
with ec.disable_templating():
ec_fn = os.path.basename(ec.path)
easyblock = ec.get('easyblock')
exts_defaultclass = ec.get('exts_defaultclass')
exts_default_options = ec.get('exts_default_options', {})
download_dep_fail = ec.get('download_dep_fail')
exts_download_dep_fail = ec.get('exts_download_dep_fail')
use_pip = ec.get('use_pip')
if use_pip is None:
use_pip = exts_default_options.get('use_pip')
# only easyconfig parameters as they are defined in the easyconfig file,
# does *not* include other easyconfig parameters with their default value!
pure_ec = ec.parser.get_config_dict()
# download_dep_fail should be set when using PythonPackage
if easyblock == 'PythonPackage':
if download_dep_fail is None:
failing_checks.append("'download_dep_fail' should be set in %s" % ec_fn)
if pure_ec.get('source_urls') == python_default_urls:
failing_checks.append("'source_urls' should not be defined when using the default value "
"in %s" % ec_fn)
# use_pip should be set when using PythonPackage or PythonBundle (except for whitelisted easyconfigs)
if easyblock in ['PythonBundle', 'PythonPackage']:
if use_pip is None and not any(re.match(regex, ec_fn) for regex in whitelist_pip):
failing_checks.append("'use_pip' should be set in %s" % ec_fn)
# download_dep_fail is enabled automatically in PythonBundle easyblock, so shouldn't be set
if easyblock == 'PythonBundle':
if download_dep_fail or exts_download_dep_fail:
fail = "'*download_dep_fail' should not be set in %s since PythonBundle easyblock is used" % ec_fn
failing_checks.append(fail)
if pure_ec.get('exts_default_options', {}).get('source_urls') == python_default_urls:
failing_checks.append("'source_urls' should not be defined in exts_default_options when using "
"the default value in %s" % ec_fn)
elif exts_defaultclass == 'PythonPackage':
# bundle of Python packages should use PythonBundle
if easyblock == 'Bundle':
fail = "'PythonBundle' easyblock should be used for bundle of Python packages in %s" % ec_fn
failing_checks.append(fail)
else:
# both download_dep_fail and use_pip should be set via exts_default_options
# when installing Python packages as extensions
for key in ['download_dep_fail', 'use_pip']:
if exts_default_options.get(key) is None:
failing_checks.append("'%s' should be set in exts_default_options in %s" % (key, ec_fn))
# if Python is a dependency, that should be reflected in the versionsuffix
# Tkinter is an exception, since its version always matches the Python version anyway
# Python 3.8.6 and later are also excluded, as we consider python 3 the default python
# Also whitelist some updated versions of Amber
whitelist_python_suffix = [
'Amber-16-*-2018b-AmberTools-17-patchlevel-10-15.eb',
'Amber-16-intel-2017b-AmberTools-17-patchlevel-8-12.eb',
'R-keras-2.1.6-foss-2018a-R-3.4.4.eb',
]
whitelisted = any(re.match(regex, ec_fn) for regex in whitelist_python_suffix)
has_python_dep = any(LooseVersion(dep['version']) < LooseVersion('3.8.6')
for dep in ec['dependencies'] if dep['name'] == 'Python')
if has_python_dep and ec.name != 'Tkinter' and not whitelisted:
if not re.search(r'-Python-[23]\.[0-9]+\.[0-9]+', ec['versionsuffix']):
msg = "'-Python-%%(pyver)s' should be included in versionsuffix in %s" % ec_fn
# This is only a failure for newly added ECs, not for existing ECS
# As that would probably break many ECs
if ec_fn in self.added_ecs_filenames:
failing_checks.append(msg)
else:
print('\nNote: Failed non-critical check: ' + msg)
else:
has_recent_python3_dep = any(LooseVersion(dep['version']) >= LooseVersion('3.8.6')
for dep in ec['dependencies'] if dep['name'] == 'Python')
if has_recent_python3_dep and re.search(r'-Python-3\.[0-9]+\.[0-9]+', ec['versionsuffix']):
msg = "'-Python-%%(pyver)s' should no longer be included in versionsuffix in %s" % ec_fn
failing_checks.append(msg)
# require that running of "pip check" during sanity check is enabled via sanity_pip_check
if easyblock in ['PythonBundle', 'PythonPackage']:
sanity_pip_check = ec.get('sanity_pip_check') or exts_default_options.get('sanity_pip_check')
if not sanity_pip_check and not any(re.match(regex, ec_fn) for regex in whitelist_pip_check):
failing_checks.append("sanity_pip_check should be enabled in %s" % ec_fn)
if failing_checks:
self.fail('\n'.join(failing_checks))
@skip_if_not_pr_to_non_main_branch()
def test_pr_R_packages(self):
"""Several checks for easyconfigs that install (bundles of) R packages."""
failing_checks = []
for ec in self.changed_ecs:
ec_fn = os.path.basename(ec.path)
exts_defaultclass = ec.get('exts_defaultclass')
if exts_defaultclass == 'RPackage' or ec.name == 'R':
seen_exts = set()
for ext in ec['exts_list']:
if isinstance(ext, (tuple, list)):
ext_name = ext[0]
else:
ext_name = ext
if ext_name in seen_exts:
failing_checks.append('%s was added multiple times to exts_list in %s' % (ext_name, ec_fn))
else:
seen_exts.add(ext_name)
self.assertFalse(failing_checks, '\n'.join(failing_checks))
@skip_if_not_pr_to_non_main_branch()
def test_pr_sanity_check_paths(self):
"""Make sure a custom sanity_check_paths value is specified for easyconfigs that use a generic easyblock."""
# some generic easyblocks already have a decent customised sanity_check_paths,
# including CMakePythonPackage, GoPackage, PythonBundle & PythonPackage;
# BuildEnv, ModuleRC and Toolchain easyblocks doesn't install anything so there is nothing to check.
whitelist = ['BuildEnv', 'CMakePythonPackage', 'CrayToolchain', 'GoPackage', 'ModuleRC',
'PythonBundle', 'PythonPackage', 'Toolchain']
# Bundles of dependencies without files of their own
# Autotools: Autoconf + Automake + libtool, (recent) GCC: GCCcore + binutils, CUDA: GCC + CUDAcore,
# CESM-deps: Python + Perl + netCDF + ESMF + git, FEniCS: DOLFIN and co
bundles_whitelist = ['Autotools', 'CESM-deps', 'CUDA', 'GCC', 'FEniCS', 'ESL-Bundle', 'ROCm']
failing_checks = []
for ec in self.changed_ecs:
easyblock = ec.get('easyblock')
if is_generic_easyblock(easyblock) and not ec.get('sanity_check_paths'):
sanity_check_ok = False
if easyblock in whitelist or (easyblock == 'Bundle' and ec['name'] in bundles_whitelist):
sanity_check_ok = True
# also allow bundles that enable per-component sanity checks
elif easyblock == 'Bundle':
if ec['sanity_check_components'] or ec['sanity_check_all_components']:
sanity_check_ok = True
if not sanity_check_ok:
ec_fn = os.path.basename(ec.path)
failing_checks.append("No custom sanity_check_paths found in %s" % ec_fn)
self.assertFalse(failing_checks, '\n'.join(failing_checks))
@skip_if_not_pr_to_non_main_branch()
def test_pr_https(self):
"""Make sure https:// URL is used (if it exists) for homepage/source_urls (rather than http://)."""
whitelist = [
'Kaiju', # invalid certificate at https://kaiju.binf.ku.dk
'libxml2', # https://xmlsoft.org works, but invalid certificate
'p4vasp', # https://www.p4vasp.at doesn't work
'ITSTool', # https://itstool.org/ doesn't work
'UCX-', # bad certificate for https://www.openucx.org
'MUMPS', # https://mumps.enseeiht.fr doesn't work
'PyFR', # https://www.pyfr.org doesn't work
'PycURL', # bad certificate for https://pycurl.io/
]
url_whitelist = [
# https:// doesn't work, results in index page being downloaded instead
# (see https://github.com/easybuilders/easybuild-easyconfigs/issues/9692)
'http://isl.gforge.inria.fr',
# https:// leads to File Not Found
'http://tau.uoregon.edu/',
# https:// has outdated SSL configurations
'http://faculty.scs.illinois.edu',
]
# Cache: Mapping of already checked HTTP urls to whether the HTTPS variant works
checked_urls = dict()
def check_https_url(http_url):
"""Check if the https url works"""
http_url = http_url.rstrip('/') # Remove trailing slashes
https_url_works = checked_urls.get(http_url)
if https_url_works is None:
https_url = http_url.replace('http://', 'https://')
try:
https_url_works = bool(urlopen(https_url, timeout=5))
except Exception:
https_url_works = False
checked_urls[http_url] = https_url_works
http_regex = re.compile('http://[^"\'\n]+', re.M)
failing_checks = []
for ec in self.changed_ecs:
ec_fn = os.path.basename(ec.path)
# skip whitelisted easyconfigs
if any(ec_fn.startswith(x) for x in whitelist):
continue
# ignore commented out lines in easyconfig files when checking for http:// URLs
ec_txt = '\n'.join(line for line in ec.rawtxt.split('\n') if not line.startswith('#'))
for http_url in http_regex.findall(ec_txt):
# skip whitelisted http:// URLs
if any(http_url.startswith(x) for x in url_whitelist):
continue
if check_https_url(http_url):
failing_checks.append("Found http:// URL in %s, should be https:// : %s" % (ec_fn, http_url))
if failing_checks:
self.fail('\n'.join(failing_checks))
def template_easyconfig_test(self, spec):
"""Tests for an individual easyconfig: parsing, instantiating easyblock, check patches, ..."""
# set to False, so it's False in case of this test failing
global single_tests_ok
prev_single_tests_ok = single_tests_ok
single_tests_ok = False
# parse easyconfig
ecs = process_easyconfig(spec)
if len(ecs) == 1:
ec = ecs[0]['ec']
# cache the parsed easyconfig, to avoid that it is parsed again
EasyConfigTest._parsed_easyconfigs.append(ecs[0])
else:
self.assertTrue(False, "easyconfig %s does not contain blocks, yields only one parsed easyconfig" % spec)
# check easyconfig file name
expected_fn = '%s-%s.eb' % (ec['name'], det_full_ec_version(ec))
msg = "Filename '%s' of parsed easyconfig matches expected filename '%s'" % (spec, expected_fn)
self.assertEqual(os.path.basename(spec), expected_fn, msg)
name, easyblock = fetch_parameters_from_easyconfig(ec.rawtxt, ['name', 'easyblock'])
# make sure easyconfig file is in expected location
expected_subdir = os.path.join('easybuild', 'easyconfigs', letter_dir_for(name), name)
subdir = os.path.join(*spec.split(os.path.sep)[-5:-1])
fail_msg = "Easyconfig file %s not in expected subdirectory %s" % (spec, expected_subdir)
self.assertEqual(expected_subdir, subdir, fail_msg)
# sanity check for software name, moduleclass
self.assertEqual(ec['name'], name)
self.assertTrue(ec['moduleclass'] in build_option('valid_module_classes'))
# instantiate easyblock with easyconfig file
app_class = get_easyblock_class(easyblock, name=name)
# check that automagic fallback to ConfigureMake isn't done (deprecated behaviour)
fn = os.path.basename(spec)
error_msg = "%s relies on automagic fallback to ConfigureMake, should use easyblock = 'ConfigureMake' instead" % fn
self.assertTrue(easyblock or app_class is not ConfigureMake, error_msg)
# dump the easyconfig file;
# this should be done before creating the easyblock instance (done below via app_class),
# because some easyblocks (like PythonBundle) modify easyconfig parameters at initialisation
handle, test_ecfile = tempfile.mkstemp()
os.close(handle)
ec.dump(test_ecfile)
dumped_ec = EasyConfigParser(test_ecfile).get_config_dict()
os.remove(test_ecfile)
app = app_class(ec)
# more sanity checks
self.assertTrue(name, app.name)
self.assertTrue(ec['version'], app.version)
# make sure that deprecated 'dummy' toolchain is no longer used, should use 'system' toolchain instead
# but give recent EasyBuild easyconfigs special treatment to avoid breaking "eb --install-latest-eb-release"
ec_fn = os.path.basename(spec)
if not (ec_fn == 'EasyBuild-3.9.4.eb' or ec_fn.startswith('EasyBuild-4.')):
error_msg_tmpl = "%s should use 'system' toolchain rather than deprecated 'dummy' toolchain"
self.assertFalse(ec['toolchain']['name'] == 'dummy', error_msg_tmpl % os.path.basename(spec))
# make sure that $root is not used, since it is not compatible with module files in Lua syntax
res = re.findall(r'.*\$root.*', ec.rawtxt, re.M)
error_msg = "Found use of '$root', not compatible with modules in Lua syntax, use '%%(installdir)s' instead: %s"
self.assertFalse(res, error_msg % res)
# check for redefined easyconfig parameters, there should be none...
param_def_regex = re.compile(r'^(?P<key>\w+)\s*=', re.M)
keys = param_def_regex.findall(ec.rawtxt)
redefined_keys = []
for key in sorted(nub(keys)):
cnt = keys.count(key)
if cnt > 1:
redefined_keys.append((key, cnt))
redefined_keys_error_msg = "There should be no redefined easyconfig parameters, found %d: " % len(redefined_keys)
redefined_keys_error_msg += ', '.join('%s (%d)' % x for x in redefined_keys)
self.assertFalse(redefined_keys, redefined_keys_error_msg)
# make sure old GitHub urls for EasyBuild that include 'hpcugent' are no longer used
old_urls = [
'github.com/hpcugent/easybuild',
'hpcugent.github.com/easybuild',
'hpcugent.github.io/easybuild',
]
for old_url in old_urls:
self.assertFalse(old_url in ec.rawtxt, "Old URL '%s' not found in %s" % (old_url, spec))
# make sure binutils is included as a (build) dep if toolchain is GCCcore
if ec['toolchain']['name'] == 'GCCcore':
# with 'Tarball' easyblock: only unpacking, no building; Eigen is also just a tarball
requires_binutils = ec['easyblock'] not in ['Tarball'] and ec['name'] not in ['ANIcalculator', 'Eigen']
# let's also exclude the very special case where the system GCC is used as GCCcore, and only apply this
# exception to the dependencies of binutils (since we should eventually build a new binutils with GCCcore)
if ec['toolchain']['version'] == 'system':
binutils_complete_dependencies = ['M4', 'Bison', 'flex', 'help2man', 'zlib', 'binutils']
requires_binutils &= bool(ec['name'] not in binutils_complete_dependencies)
# if no sources/extensions/components are specified, it's just a bundle (nothing is being compiled)
requires_binutils &= bool(ec['sources'] or ec['exts_list'] or ec.get('components'))
if requires_binutils:
# dependencies() returns both build and runtime dependencies
# in some cases, binutils can also be a runtime dep (e.g. for Clang)
# Also using GCC directly as a build dep is also allowed (it includes the correct binutils)
dep_names = [d['name'] for d in ec.dependencies()]
self.assertTrue('binutils' in dep_names or 'GCC' in dep_names,
"binutils or GCC is a build dep in %s: %s" % (spec, dep_names))
# make sure that OpenSSL wrapper is used rather than OS dependency,
# for easyconfigs using a 2021a (sub)toolchain or more recent common toolchain version
osdeps = ec['osdependencies']
if osdeps:
# check whether any entry in osdependencies related to OpenSSL
openssl_osdep = False
for osdep in osdeps:
if isinstance(osdep, string_type):
osdep = [osdep]
if any('libssl' in x for x in osdep) or any('openssl' in x for x in osdep):
openssl_osdep = True
if openssl_osdep:
tcname = ec['toolchain']['name']
tcver = LooseVersion(ec['toolchain']['version'])
gcc_subtc_2021a = tcname in ('GCCcore', 'GCC') and tcver > LooseVersion('10.3')
if gcc_subtc_2021a or (tcname in ('foss', 'gompi', 'iimpi', 'intel') and tcver >= LooseVersion('2021')):
self.assertFalse(openssl_osdep, "OpenSSL should not be listed as OS dependency in %s" % spec)
src_cnt = len(ec['sources'])
patch_checksums = ec['checksums'][src_cnt:]
patch_checksums_cnt = len(patch_checksums)
# make sure all patch files are available
specdir = os.path.dirname(spec)
specfn = os.path.basename(spec)
for idx, patch in enumerate(ec['patches']):
if isinstance(patch, (tuple, list)):
patch = patch[0]
# only check actual patch files, not other files being copied via the patch functionality
patch_full = os.path.join(specdir, patch)
if patch.endswith('.patch'):
msg = "Patch file %s is available for %s" % (patch_full, specfn)
self.assertTrue(os.path.isfile(patch_full), msg)
# verify checksum for each patch file
if idx < patch_checksums_cnt and (os.path.exists(patch_full) or patch.endswith('.patch')):
checksum = patch_checksums[idx]
error_msg = "Invalid checksum for patch file %s in %s: %s" % (patch, ec_fn, checksum)
res = verify_checksum(patch_full, checksum)
self.assertTrue(res, error_msg)
# make sure 'source' step is not being skipped,
# since that implies not verifying the checksum
error_msg = "'source' step should not be skipped in %s, since that implies not verifying checksums" % ec_fn
self.assertFalse(ec['checksums'] and ('source' in ec['skipsteps']), error_msg)
for ext in ec['exts_list']:
if isinstance(ext, (tuple, list)) and len(ext) == 3:
ext_name = ext[0]
self.assertTrue(isinstance(ext[2], dict), "3rd element of extension spec is a dictionary")
# fall back to assuming a single source file for an extension
src_cnt = len(ext[2].get('sources', [])) or 1
checksums = ext[2].get('checksums', [])
patch_checksums = checksums[src_cnt:]
for idx, ext_patch in enumerate(ext[2].get('patches', [])):
if isinstance(ext_patch, (tuple, list)):
ext_patch = ext_patch[0]
# only check actual patch files, not other files being copied via the patch functionality
ext_patch_full = os.path.join(specdir, ext_patch)
if ext_patch.endswith('.patch'):
msg = "Patch file %s is available for %s" % (ext_patch_full, specfn)
self.assertTrue(os.path.isfile(ext_patch_full), msg)
# verify checksum for each patch file
if idx < patch_checksums_cnt and (os.path.exists(ext_patch_full) or ext_patch.endswith('.patch')):
checksum = patch_checksums[idx]
error_msg = "Invalid checksum for patch file %s for %s extension in %s: %s"
res = verify_checksum(ext_patch_full, checksum)
self.assertTrue(res, error_msg % (ext_patch, ext_name, ec_fn, checksum))
# check whether all extra_options defined for used easyblock are defined
extra_opts = app.extra_options()
for key in extra_opts:
self.assertTrue(key in app.cfg)
app.close_log()
os.remove(app.logfile)
# inject dummy values for templates that are only known at a later stage
dummy_template_values = {
'builddir': '/dummy/builddir',
'installdir': '/dummy/installdir',
'parallel': '2',
}
ec.template_values.update(dummy_template_values)
ec_dict = ec.parser.get_config_dict()
orig_toolchain = ec_dict['toolchain']
for key in ec_dict:
# skip parameters for which value is equal to default value
orig_val = ec_dict[key]
if key in DEFAULT_CONFIG and orig_val == DEFAULT_CONFIG[key][0]:
continue
if key in extra_opts and orig_val == extra_opts[key][0]:
continue
if key not in DEFAULT_CONFIG and key not in extra_opts:
continue
orig_val = resolve_template(ec_dict[key], ec.template_values)
dumped_val = resolve_template(dumped_ec[key], ec.template_values)
# take into account that dumped value for *dependencies may include hard-coded subtoolchains
# if no easyconfig was found for the dependency with the 'parent' toolchain,
# if may get resolved using a subtoolchain, which is then hardcoded in the dumped easyconfig
if key in DEPENDENCY_PARAMETERS:
# number of dependencies should remain the same
self.assertEqual(len(orig_val), len(dumped_val))
for orig_dep, dumped_dep in zip(orig_val, dumped_val):
# name should always match
self.assertEqual(orig_dep[0], dumped_dep[0])
# version should always match, or be a possibility from the version dict
if isinstance(orig_dep[1], dict):
self.assertTrue(dumped_dep[1] in orig_dep[1].values())
else:
self.assertEqual(orig_dep[1], dumped_dep[1])
# 3rd value is versionsuffix;
if len(dumped_dep) >= 3:
# if no versionsuffix was specified in original dep spec, then dumped value should be empty string
if len(orig_dep) >= 3:
self.assertEqual(dumped_dep[2], orig_dep[2])
else:
self.assertEqual(dumped_dep[2], '')
# 4th value is toolchain spec
if len(dumped_dep) >= 4:
if len(orig_dep) >= 4:
self.assertEqual(dumped_dep[3], orig_dep[3])
else:
# if a subtoolchain is specifed (only) in the dumped easyconfig,
# it should *not* be the same as the parent toolchain
self.assertNotEqual(dumped_dep[3], (orig_toolchain['name'], orig_toolchain['version']))
# take into account that for some string-valued easyconfig parameters (configopts & co),
# the easyblock may have injected additional values, which affects the dumped easyconfig file
elif isinstance(orig_val, string_type):
error_msg = "%s value '%s' should start with '%s'" % (key, dumped_val, orig_val)
self.assertTrue(dumped_val.startswith(orig_val), error_msg)
else:
error_msg = "%s value should be equal in original and dumped easyconfig: '%s' vs '%s'"
self.assertEqual(orig_val, dumped_val, error_msg % (key, orig_val, dumped_val))
# test passed, so set back to True
single_tests_ok = True and prev_single_tests_ok
def suite(loader=None):
"""Return all easyblock initialisation tests."""
def make_inner_test(spec_path):
def innertest(self):
template_easyconfig_test(self, spec_path)
return innertest
# dynamically generate a separate test for each of the available easyconfigs
# define new inner functions that can be added as class methods to InitTest
easyconfigs_path = get_paths_for('easyconfigs')[0]
cnt = 0
for (subpath, dirs, specs) in os.walk(easyconfigs_path, topdown=True):
# ignore archived easyconfigs
if '__archive__' in dirs:
dirs.remove('__archive__')
for spec in specs:
if spec.endswith('.eb') and spec != 'TEMPLATE.eb':
cnt += 1
innertest = make_inner_test(os.path.join(subpath, spec))
innertest.__doc__ = "Test for easyconfig %s" % spec
# double underscore so parsing tests are run first
innertest.__name__ = "test__parse_easyconfig_%s" % spec
setattr(EasyConfigTest, innertest.__name__, innertest)
print("Found %s easyconfigs..." % cnt)
if not loader:
loader = TestLoader()
return loader.loadTestsFromTestCase(EasyConfigTest)
if __name__ == '__main__':
main()
|
ULHPC/easybuild-easyconfigs
|
test/easyconfigs/easyconfigs.py
|
Python
|
gpl-2.0
| 71,491
|
[
"ABINIT",
"ASE",
"Amber",
"BLAST",
"Bioconductor",
"CP2K",
"Elk",
"GPAW",
"NetCDF",
"PySCF",
"WIEN2k",
"pysam"
] |
b5e0979ffa2cc93ee7e0c4933da74085f7c2b90287b2c2bd955f924d246577b2
|
# -*-coding:utf8-*-
from __future__ import print_function
"""
This file is part of SkyLab
Skylab is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# python packages
from copy import deepcopy
from itertools import product
import logging
# scipy-project imports
import numpy as np
import scipy.interpolate
from scipy.stats import norm
import healpy as hp
# local package imports
from . import set_pars
from .utils import kernel_func
# get module logger
def trace(self, message, *args, **kwargs):
""" Add trace to logger with output level beyond debug
"""
if self.isEnabledFor(5):
self._log(5, message, args, **kwargs)
logging.addLevelName(5, "TRACE")
logging.Logger.trace = trace
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
# default values for parameters
# model-parameters
_gamma_params = dict(gamma=[2., (1., 4.)])
# histogramming
_sinDec_bins = 25
_sinDec_range = None
_2dim_bins = 25
# spline
_ratio_perc = 99.
_1dim_order = 2
_2dim_order = 2
_precision = 0.1
_par_val = np.nan
_parab_cache = np.zeros((0, ), dtype=[("S1", np.float), ("a", np.float),
("b", np.float)])
class NullModel(object):
r"""Base class of models for likelihood fitting, this defines every core
class of the likelihood fitting that is needed in the point source
calculation without implementing any functionality. Use this class as
starting point for a unbinned point source likelihood model
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError(
"NullModel only to be used as abstract superclass".format(
self.__repr__()))
@property
def params(self):
return self._params
@params.setter
def params(self, val):
self._params = val
return
@params.deleter
def params(self):
self._params = dict()
return
def __raise__(self, *args, **kwargs):
raise NotImplementedError("Implemented as abstract in {0:s}...".format(
self.__repr__()))
def __call__(self, *args, **kwargs):
r"""Calling the class-object will set it up for use of the other
functions, i.e., creating splines out of data, etc.
"""
self.__raise__()
def background(self, *args, **kwargs):
r"""Calculation of the background probability *B* in the point source
likelihood, mainly a spatial dependent term.
"""
self.__raise__()
def extended_background(self, *args, **kwargs):
r"""Calculation of the background probability *B* in the point source
likelihood, mainly a spatial dependent term.
"""
self.__raise__()
def signal_sc(self, *args, **kwargs):
r"""Calculation of the right ascension scrambled signal acceptance for
background subtraction
"""
self.__raise__()
def signal(self, *args, **kwargs):
r"""Calculation of the signal probability *S* in the point source
likelihood, mainly a spatial dependent term.
"""
self.__raise__()
def extended_signal(self, *args, **kwargs):
r"""Calculation of the signal probability *S* for an extended source
, mainly a spatial dependent term.
"""
self.__raise__()
def reset(self, *args, **kwargs):
r"""Resetting the llh-model to delete possible cached values
"""
self.__raise__()
def weight(self, *args, **kwargs):
r"""Additional weights calculated for each event, commonly used to
implement energy weights in the point source likelihood.
It differs from signal and background distributions that is (to
first approximation) does not depend on the source position.
"""
self.__raise__()
class ClassicLLH(NullModel):
r"""Classic likelihood model for point source searches, only using spatial
information of each event
"""
sinDec_bins = _sinDec_bins
sinDec_range = _sinDec_range
_order = _1dim_order
_bckg_spline = np.nan
_gamma = 2.
def __init__(self, *args, **kwargs):
r"""Constructor of ClassicLLH. Set all configurations here.
"""
self.params = kwargs.pop("params", dict())
# Set all attributes passed to class
set_pars(self, **kwargs)
return
def __call__(self, exp, mc, livetime, **kwargs):
r"""Use experimental data to create one dimensional spline of
declination information for background information.
Parameters
-----------
exp : structured array
Experimental data with all neccessary fields, i.e., sinDec for
ClassicLLH
mc : structured array
Same as exp for Monte Carlo plus true information.
livetime : float
Livetime to scale the Monte Carlo with
"""
hist, bins = np.histogram(exp["sinDec"], density=True,
bins=self.sinDec_bins,
range=self.sinDec_range)
# background spline
# overwrite range and bins to actual bin edges
self.sinDec_bins = bins
self.sinDec_range = (bins[0], bins[-1])
if np.any(hist <= 0.):
bmids = (self.sinDec_bins[1:] + self.sinDec_bins[:-1]) / 2.
estr = ("Declination hist bins empty, this must not happen! "
+"Empty bins: {0}".format(bmids[hist <= 0.]))
raise ValueError(estr)
elif np.any((exp["sinDec"] < bins[0])|(exp["sinDec"] > bins[-1])):
raise ValueError("Data outside of declination bins!")
self._bckg_spline = scipy.interpolate.InterpolatedUnivariateSpline(
(bins[1:] + bins[:-1]) / 2.,
np.log(hist), k=self.order)
# eff. Area
self._effA(mc, livetime, **kwargs)
return
def __str__(self):
r"""String representation of ClassicLLH.
"""
out_str = "{0:s}\n".format(self.__repr__())
out_str += 67*"~"+"\n"
out_str += "Spatial background hist:\n"
out_str += "\tSinDec bins : {0:3d}\n".format(len(self.sinDec_bins)-1)
out_str += "\tSinDec range : {0:-4.2f} to {1:-4.2f}\n".format(
*self.sinDec_range)
out_str += 67*"~"+"\n"
return out_str
def _effA(self, mc, livetime, **kwargs):
r"""Build splines for effective Area given a fixed spectral
index *gamma*.
"""
# powerlaw weights
w = mc["ow"] * mc["trueE"]**(-self.gamma) * livetime * 86400.
# get pdf of event distribution
h, bins = np.histogram(np.sin(mc["trueDec"]), weights=w,
bins=self.sinDec_bins, density=True)
# normalize by solid angle
h /= np.diff(self.sinDec_bins)
# multiply histogram by event sum for event densitiy
h *= w.sum()
self._spl_effA = scipy.interpolate.InterpolatedUnivariateSpline(
(bins[1:] + bins[:-1]) / 2., np.log(h), k=self.order)
# save total mc weight for correct signal proportions in multi year extended source
self._total_mc_weight = w.sum()
return
@property
def bckg_spline(self):
return self._bckg_spline
@bckg_spline.setter
def bckg_spline(self, val):
if not hasattr(val, "__call__"):
print(">>> WARNING: {0} is not callable! Not spline-ish".format(val))
return
self._bckg_spline = val
return
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, val):
self._gamma = float(val)
return
@property
def order(self):
return self._order
@order.setter
def order(self, val):
self.order = int(val)
return
def background(self, ev):
r"""Spatial background distribution.
For IceCube is only declination dependent, in a more general scenario,
it is dependent on zenith and
azimuth, e.g. in ANTARES, KM3NET, or using time dependent information.
Parameters
-----------
ev : structured array
Event array, importand information *sinDec* for this calculation
Returns
--------
P : array-like
spatial background probability for each event to be found
at *sinDec*
"""
return 1. / 2. / np.pi * np.exp(self.bckg_spline(ev["sinDec"]))
def extended_background(self, ev, background, coords = 'equatorial'):
pix = self.get_pix(ev, background, coords)
return np.take(background, pix)
def effA(self, dec, **params):
r"""Calculate integrated effective Area at declination for distributing
source events among different samples.
"""
if (np.sin(dec) < self.sinDec_bins[0]
or np.sin(dec) > self.sinDec_bins[-1]):
return 0., None
return self._spl_effA(np.sin(dec)), None
def total_mc_weight(self, **params):
r"""save total mc weight for correct signal proportions in multi year extended source
"""
return self._total_mc_weight, None
def reset(self):
r"""Classic likelihood does only depend on spatial part, needs no
caching
"""
return
def signal(self, src_ra, src_dec, ev):
r"""Spatial distance between source position and events
Signal is assumed to cluster around source position.
The distribution is assumed to be well approximated by a gaussian
locally.
Parameters
-----------
ev : structured array
Event array, import information: sinDec, ra, sigma
Returns
--------
P : array-like
Spatial signal probability for each event
"""
cos_ev = np.sqrt(1. - ev["sinDec"]**2)
cosDist = (np.cos(src_ra - ev["ra"])
* np.cos(src_dec) * cos_ev
+ np.sin(src_dec) * ev["sinDec"])
# handle possible floating precision errors
cosDist[np.isclose(cosDist, 1.) & (cosDist > 1)] = 1.
dist = np.arccos(cosDist)
return (1./2./np.pi/ev["sigma"]**2
* np.exp(-dist**2 / 2. / ev["sigma"]**2))
def get_pix(self, ev, m, coords = 'equatorial'):
r"""Calculates healpix pixel for each event
Parameters
-----------
ev : structured array
Event array, import information: sinDec, ra, sigma
m : map or array of maps in healpix array format
coords : string
Can be either equatorial or galactic
Returns
--------
pix : array-like
Array of event pixels in healpix map
"""
# get number of pixels in map
if m.ndim == 1: npix = len(m)
else: npix = len(m[0])
# calculate nside of map
nside = hp.npix2nside(npix)
# rotate to galactic coords when desired
if coords == 'galactic':
r = hp.Rotator(coord = ['C','G'])
theta_gal, phi_gal = r(np.pi/2. - np.arcsin(ev["sinDec"]), ev["ra"])
return hp.ang2pix(nside, theta_gal, phi_gal)
# otherwise return pix in equatorial coords
return hp.ang2pix(nside, np.pi/2. - np.arcsin(ev["sinDec"]), ev["ra"])
def extended_signal(self, template_map, sigma_bins, ev, coords = 'equatorial'):
r"""Calculates signal probabilities for each event
from a template source map
Parameters
-----------
ev : structured array
Event array, import information: sinDec, ra, sigma
template_map: source map in healpix array format
Returns
--------
P : array-like
Spatial signal probability for each event
"""
pix = self.get_pix(ev, template_map, coords)
if template_map.ndim == 1:
#Only one map, sample straight from it
return np.take(template_map, pix)
else:
bin_nums = np.digitize(np.degrees(ev['sigma']), sigma_bins)-1
if np.any(np.less(bin_nums,0)):
#There are sigma values smaller than your bin range! Events will use smallest sigma bin.
bin_nums[bin_nums < 0] = 0
indices = np.arange(len(ev['sigma']))
signal_vals = np.zeros(len(ev['sigma']))
for i, sbin in enumerate(sigma_bins):
mask = np.equal(bin_nums, i)
vals = np.take(template_map[i], pix[mask])
for j, index in enumerate(indices[mask]):
signal_vals[index] += vals[j]
return signal_vals
def signal_sc(self, template_map, ev, coords = 'equatorial'):
pix = self.get_pix(ev, template_map, coords)
return np.take(template_map, pix)
def weight(self, ev, **params):
r"""For classicLLH, no weighting of events
"""
return np.ones(len(ev)), None
class UniformLLH(ClassicLLH):
r"""Spatial LLH class that assumes uniform distribution.
"""
def __call__(self, *args, **kwargs):
return
def background(self, ev):
return np.full(len(ev), 1. / 4. / np.pi)
class WeightLLH(ClassicLLH):
r"""Likelihood class supporting weights for the calculation.
The weights are calculated using N observables for exp. data and Monte
Carlo.
Abstract class, not incorporating a weighting scheme for Monte Carlo.
"""
_precision = _precision
_g1 = _par_val
_w_cache = _parab_cache
def __init__(self, params, pars, bins, *args, **kwargs):
r"""Constructor
Parameters
-----------
params : dict
List of fit parameters. Each entry is a tuple out of
(seed, [lower bound, upper bound])
pars : list
Parameter names to use for histogram, without sinDec, which is
added as last normalisation parameter
bins : int, ndarray
Binning for each parameter
Other Parameters
-----------------
range : ndarray
Bin ranges for each parameter
kernel : ndarray, int, float
Smoothing filter defining the kernel for smoothing. Smoothing done
solely for dimensions that are not normalised. A ndarray specifies
the filter directly, an int is used for a flat kernel with size
of *filter* in direction of both sides, a float uses a normal
distributed kernel with approximately one standard deviation per
bin.
"""
params = params
self.hist_pars = pars
self._ndim_bins = bins
self._ndim_range = kwargs.pop("range", None)
self._ndim_norm = kwargs.pop("normed", 0)
# define kernel
kernel = kwargs.pop("kernel", 0)
if np.all(np.asarray(kernel) == 0):
# No smoothing
self._XX = None
elif isinstance(kernel, (list, np.ndarray)):
kernel_arr = np.asarray(kernel)
assert(np.all(kernel_arr >= 0))
XX = np.meshgrid(*([kernel_arr for i in range(len(self.hist_pars)
- self._ndim_norm)]
+ [[1] for i in range(self._ndim_norm)]))
self._XX = np.product(XX, axis=0).T
elif isinstance(kernel, int):
assert(kernel > 0)
kernel_arr = np.ones(2 * kernel + 1, dtype=np.float)
XX = np.meshgrid(*([kernel_arr for i in range(len(self.hist_pars)
- self._ndim_norm)]
+ [[1] for i in range(self._ndim_norm)]))
self._XX = np.product(XX, axis=0).T
elif isinstance(kernel, float):
assert(kernel >= 1)
val = 1.6635
r = np.linspace(-val, val, 2 * int(kernel) + 1)
kernel_arr = norm.pdf(r)
XX = np.meshgrid(*([kernel_arr for i in range(len(self.hist_pars)
- self._ndim_norm)]
+ [[1] for i in range(self._ndim_norm)]))
self._XX = np.product(XX, axis=0).T
else:
raise ValueError("Kernel has to be positive int / float or array")
super(WeightLLH, self).__init__(*args, params=params, **kwargs)
self._w_spline_dict = dict()
return
def __call__(self, exp, mc, livetime):
r"""In addition to *classicLLH.__call__(),
splines for energy-declination are created as well.
"""
self._setup(exp)
# calclate splines for all values of splines
par_grid = dict()
for par, val in self.params.iteritems():
# create grid of all values that could come up due to boundaries
# use one more grid point below and above for gradient calculation
low, high = val[1]
grid = np.arange(low - self._precision,
high + 2. * self._precision,
self._precision)
par_grid[par] = grid
pars = par_grid.keys()
for tup in product(*par_grid.values()):
# call spline function to cache the spline
self._ratio_spline(mc, **dict([(p_i, self._around(t_i))
for p_i, t_i in zip(pars, tup)]))
# create spatial splines of classic LLH class and eff. Area
super(WeightLLH, self).__call__(exp, mc, livetime, **par_grid)
return
def __str__(self):
r"""String representation
"""
out_str = super(WeightLLH, self).__str__()
out_str += "Weighting hist:\n"
for p, b, r in zip(self.hist_pars, self._ndim_bins, self._ndim_range):
out_str += "\t{0:11s} : {1:3d}\n".format(p + " bins", len(b)-1)
out_str += "\t{0:11s} : {1:-4.2f} to {2:-4.2f}\n".format(
p + " range", *r)
out_str += "\tPrecision : {0:4.2f}\n".format(self._precision)
out_str += 67*"~"+"\n"
return out_str
def _around(self, value):
r"""Round a value to a precision defined in the class.
Parameters
-----------
value : array-like
Values to round to precision.
Returns
--------
round : array-like
Rounded values.
"""
return np.around(float(value) / self._precision) * self._precision
def _get_weights(self, **params):
r"""Calculate weights using the given parameters.
Parameters
-----------
params : dict
Dictionary containing the parameter values for the weighting.
Returns
--------
weights : array-like
Weights for each event
"""
raise NotImplementedError("Weigthing not specified, using subclass")
def _hist(self, arr, weights=None):
r"""Create histogram of data so that it is correctly normalized.
The edges of the histogram are copied so that the spline is defined for
the entire data range.
"""
h, binedges = np.histogramdd(arr, bins=self._ndim_bins,
range=self._ndim_range,
weights=weights, normed=True)
if self._ndim_norm > 0:
norms = np.sum(h, axis=tuple(range(h.ndim - self._ndim_norm)))
norms[norms==0] = 1.
h /= norms
return h, binedges
def _ratio_spline(self, mc, **params):
r"""Create the ratio of signal over background probabilities. With same
binning, the bin hypervolume cancels out, ensuring correct
normalisation of the histograms.
Parameters
-----------
mc : recarray
Monte Carlo events to use for spline creation
params : dict
(Physics) parameters used for signal pdf calculation.
Returns
--------
spline : scipy.interpolate.RectBivariateSpline
Spline for parameter values *params*
"""
mcvars = [mc[p] if not p == "sinDec" else np.sin(mc["trueDec"])
for p in self.hist_pars]
# create MC histogram
wSh, wSb = self._hist(mcvars, weights=self._get_weights(mc, **params))
wSh = kernel_func(wSh, self._XX)
wSd = wSh > 0.
# calculate ratio
ratio = np.ones_like(self._wB_hist, dtype=np.float)
ratio[wSd & self._wB_domain] = (wSh[wSd & self._wB_domain]
/ self._wB_hist[wSd & self._wB_domain])
# values outside of the exp domain, but inside the MC one are mapped to
# the most signal-like value
min_ratio = np.percentile(ratio[ratio>1.], _ratio_perc)
np.copyto(ratio, min_ratio, where=wSd & ~self._wB_domain)
binmids = [(wSb_i[1:] + wSb_i[:-1]) / 2. for wSb_i in wSb]
binmids[-1][[0, -1]] = wSb_i[0], wSb_i[-1]
binmids = tuple(binmids)
spline = scipy.interpolate.RegularGridInterpolator(
binmids, np.log(ratio),
method="linear",
bounds_error=False,
fill_value=0.)
self._w_spline_dict[tuple(params.items())] = spline
return spline
def _setup(self, exp):
r"""Set up everything for weight calculation.
"""
# set up weights for background distribution, reset all cached values
self._w_spline_dict = dict()
expvars = [exp[p] for p in self.hist_pars]
self._wB_hist, self._wB_bins = self._hist(expvars)
self._wB_hist = kernel_func(self._wB_hist, self._XX)
self._wB_domain = self._wB_hist > 0
# overwrite bins
self._ndim_bins = self._wB_bins
self._ndim_range = tuple([(wB_i[0], wB_i[-1])
for wB_i in self._wB_bins])
return
def _spline_eval(self, spline, ev):
r"""Evaluate spline on coordinates using the important parameters.
"""
return spline(np.vstack([ev[p] for p in self.hist_pars]).T)
@property
def hist_pars(self):
return self._hist_pars
@hist_pars.setter
def hist_pars(self, val):
self._hist_pars = list(val)
return
def reset(self):
r"""Energy weights are cached, reset all cached values.
"""
self._w_cache = _parab_cache
return
def weight(self, ev, **params):
r"""Evaluate spline for given parameters.
Parameters
-----------
ev : structured array
Events to be evaluated
params : dict
Parameters for evaluation
Returns
--------
val : array-like (N), N events
Function value.
grad : array-like (N, M), N events in M parameter dimensions
Gradients at function value.
"""
# get params
gamma = params["gamma"]
# evaluate on finite gridpoints in spectral index gamma
g1 = self._around(gamma)
dg = self._precision
# check whether the grid point of evaluation has changed
if (np.isfinite(self._g1)
and g1 == self._g1
and len(ev) == len(self._w_cache)):
S1 = self._w_cache["S1"]
a = self._w_cache["a"]
b = self._w_cache["b"]
else:
# evaluate neighbouring gridpoints and parametrize a parabola
g0 = self._around(g1 - dg)
g2 = self._around(g1 + dg)
S0 = self._spline_eval(self._w_spline_dict[(("gamma", g0), )], ev)
S1 = self._spline_eval(self._w_spline_dict[(("gamma", g1), )], ev)
S2 = self._spline_eval(self._w_spline_dict[(("gamma", g2), )], ev)
a = (S0 - 2. * S1 + S2) / (2. * dg**2)
b = (S2 - S0) / (2. * dg)
# cache values
self._g1 = g1
self._w_cache = np.zeros((len(ev),),
dtype=[("S1", np.float), ("a", np.float),
("b", np.float)])
self._w_cache["S1"] = S1
self._w_cache["a"] = a
self._w_cache["b"] = b
# calculate value at the parabola
val = np.exp(a * (gamma - g1)**2 + b * (gamma - g1) + S1)
grad = val * (2. * a * (gamma - g1) + b)
return val, np.atleast_2d(grad)
class PowerLawLLH(WeightLLH):
r"""Weighted LLH class assuming unbroken power-law spectra for weighting.
Optional Parameters
--------------------
seed : float
Seed for gamma parameter
bonds : ndarray (len 2)
Bounds for minimisation
"""
def __init__(self, *args, **kwargs):
params = dict(gamma=(kwargs.pop("seed", _gamma_params["gamma"][0]),
deepcopy(kwargs.pop("bounds", deepcopy(_gamma_params["gamma"][1])))))
super(PowerLawLLH, self).__init__(params, *args, **kwargs)
return
def _effA(self, mc, livetime, **pars):
r"""Calculate two dimensional spline of effective Area versus
declination and spectral index for Monte Carlo.
"""
gamma_vals = pars["gamma"]
w = self._get_weights(mc, gamma=gamma_vals[1])*livetime * 86400. #2nd element is always given alpha
self._total_mc_weight = w.sum()
x = np.sin(mc["trueDec"])
hist = np.vstack([np.histogram(x,
weights=self._get_weights(mc, gamma=gm)
* livetime * 86400.,
bins=self.sinDec_bins)[0]
for gm in gamma_vals]).T
# normalize bins by their binvolume, one dimension is the parameter
# with width of *precision*
bin_vol = np.diff(self.sinDec_bins)
hist /= bin_vol[:, np.newaxis] * np.full_like(gamma_vals, self._precision)
self._spl_effA = scipy.interpolate.RectBivariateSpline(
(self.sinDec_bins[1:] + self.sinDec_bins[:-1]), gamma_vals,
np.log(hist), kx=2, ky=2, s=0)
return
@staticmethod
def _get_weights(mc, **params):
r"""Calculate weights using the given parameters.
Parameters
-----------
params : dict
Dictionary containing the parameter values for the weighting.
Returns
--------
weights : array-like
Weights for each event
"""
return mc["ow"] * mc["trueE"]**(-params["gamma"])
def effA(self, dec, **params):
r"""Evaluate effective Area at declination and spectral index.
Parameters
-----------
dec : float
Declination.
gamma : float
Spectral index.
Returns
--------
effA : float
Effective area at given point(s).
grad_effA : float
Gradient at given point(s).
"""
if (np.sin(dec) < self.sinDec_bins[0]
or np.sin(dec) > self.sinDec_bins[-1]):
return 0., None
gamma = params["gamma"]
val = np.exp(self._spl_effA(np.sin(dec), gamma, grid=False, dy=0.))
grad = val * self._spl_effA(np.sin(dec), gamma, grid=False, dy=1.)
return val, dict(gamma=grad)
class EnergyLLH(PowerLawLLH):
r"""Likelihood using Energy Proxy and declination, where declination is
used for normalisation to account for changing energy distributions.
"""
def __init__(self, twodim_bins=_2dim_bins, twodim_range=None,
**kwargs):
r"""Constructor
"""
super(EnergyLLH, self).__init__(["logE", "sinDec"],
twodim_bins, range=twodim_range,
normed=1,
**kwargs)
return
class EnergyDistLLH(PowerLawLLH):
r"""Likelihood using Energy Proxy and starting distance for evaluation.
Declination is not used for normalisation assuming that the energy does not
change rapidly with declination.
"""
def __init__(self, twodim_bins=_2dim_bins, twodim_range=None,
**kwargs):
r"""Constructor
"""
super(EnergyDistLLH, self).__init__(["logE", "dist"],
twodim_bins, range=twodim_range,
**kwargs)
return
class EnergyLLHfixed(EnergyLLH):
r"""Energy Likelihood that uses external data to create the splines, and
splines are not evaluated using the data given by call method.
"""
def __init__(self, exp, mc, livetime, **kwargs):
r"""Constructor
"""
# call constructor of super-class, settings are set.
super(EnergyLLHfixed, self).__init__(**kwargs)
# do the call already
super(EnergyLLHfixed, self).__call__(exp, mc, livetime)
return
def __call__(self, exp, mc, livetime):
r"""Call function not used here
"""
print("EnergyLLH with FIXED splines used here, call has no effect")
return
|
zdgriffith/skylab
|
skylab/ps_model.py
|
Python
|
gpl-3.0
| 30,216
|
[
"Gaussian"
] |
9c3276ea2c78f27281927898942747d675f091eb610cc20c0697537b00c9b8dc
|
# Script to try and validate focus detection algorithms
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import ndimage
import scipy.signal
from PIL import Image
import zstackLib
import time
plt.ion()
filename = '/home/brian/git/pyZstack/lena.tif'
image = Image.open(filename)
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurRate = 10
j=1
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
sharpness = []
while j <= 10:
image2 = cv2.GaussianBlur(image, (j*blurRate-1,j*blurRate-1),0)
sharp, conv = zstackLib.sharpnessLaplace(image2)
sharpness.append(sharp)
if 0:
ax1.imshow(image2,'gray')
ax2.hist(image2.flatten(), 256, range=(0,256), fc='k', ec='k', normed=True)
ax2.set_xlim([0,256])
ax2.set_ylim([0,.018])
ax3.hist(conv.flatten(), 256-40, range=(40,256), fc='k', ec='k', normed=True)
ax3.set_xlim([40,256])
ax3.set_ylim([0,.030])
plt.show()
plt.pause(0.001)
plt.waitforbuttonpress()
ax1.cla()
ax2.cla()
ax3.cla()
j += 1
sharpness = np.asarray(sharpness)
plt.plot(sharpness)
plt.show()
plt.pause(0.001)
plt.waitforbuttonpress()
plt.close()
#plt.hist(image.flatten(), 256, range=(0,256), fc='k', ec='k')
|
gtg556h/pyZstack
|
sharpnessAlgorithm/callLaplacian.py
|
Python
|
mit
| 1,389
|
[
"Brian"
] |
a2ea8b2835366e9aa3fa84619c32a87a989cdff3fad29d08e180178fff858893
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('gc_classifier.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^classifier/', include('classifier.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
ToferC/gcclassifier
|
config/urls.py
|
Python
|
mit
| 1,607
|
[
"VisIt"
] |
70b5697cc6c6b5df2899915d95ff516980be662cfbb6adaef6aeec922f925f96
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2004-2005 Eero Tamminen
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# $Id$
"""Reports/Graphical Reports/Statistics Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import time
from gramps.gen.ggettext import sgettext as _
from functools import partial
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
# Person and relation types
from gramps.gen.lib import Person, FamilyRelType, EventType, EventRoleType
from gramps.gen.lib.date import Date
# gender and report type names
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, FONT_SERIF,
PARA_ALIGN_CENTER, PARA_ALIGN_LEFT,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.menu import (BooleanOption, NumberOption, EnumeratedListOption,
FilterOption, PersonOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.datehandler import displayer, parser
#------------------------------------------------------------------------
#
# Private Functions
#
#------------------------------------------------------------------------
def draw_wedge(doc, style, centerx, centery, radius, start_angle,
end_angle, short_radius=0):
from math import pi, cos, sin
while end_angle < start_angle:
end_angle += 360
p = []
degreestoradians = pi / 180.0
radiansdelta = degreestoradians / 2
sangle = start_angle * degreestoradians
eangle = end_angle * degreestoradians
while eangle < sangle:
eangle = eangle + 2 * pi
angle = sangle
if short_radius == 0:
if (end_angle - start_angle) != 360:
p.append((centerx, centery))
else:
origx = (centerx + cos(angle) * short_radius)
origy = (centery + sin(angle) * short_radius)
p.append((origx, origy))
while angle < eangle:
x = centerx + cos(angle) * radius
y = centery + sin(angle) * radius
p.append((x, y))
angle = angle + radiansdelta
x = centerx + cos(eangle) * radius
y = centery + sin(eangle) * radius
p.append((x, y))
if short_radius:
x = centerx + cos(eangle) * short_radius
y = centery + sin(eangle) * short_radius
p.append((x, y))
angle = eangle
while angle >= sangle:
x = centerx + cos(angle) * short_radius
y = centery + sin(angle) * short_radius
p.append((x, y))
angle -= radiansdelta
doc.draw_path(style, p)
delta = (eangle - sangle) / 2.0
rad = short_radius + (radius - short_radius) / 2.0
return ( (centerx + cos(sangle + delta) * rad),
(centery + sin(sangle + delta) * rad))
def draw_pie_chart(doc, center_x, center_y, radius, data, start=0):
"""
Draws a pie chart in the specified document. The data passed is plotted as
a pie chart. The data should consist of the actual data. Percentages of
each slice are determined by the routine.
@param doc: Document to which the pie chart should be added
@type doc: BaseDoc derived class
@param center_x: x coordinate in centimeters where the center of the pie
chart should be. 0 is the left hand edge of the document.
@type center_x: float
@param center_y: y coordinate in centimeters where the center of the pie
chart should be. 0 is the top edge of the document
@type center_y: float
@param radius: radius of the pie chart. The pie charts width and height
will be twice this value.
@type radius: float
@param data: List of tuples containing the data to be plotted. The values
are (graphics_format, value), where graphics_format is a BaseDoc
GraphicsStyle, and value is a floating point number. Any other items in
the tuple are ignored. This allows you to share the same data list with
the L{draw_legend} function.
@type data: list
@param start: starting point in degrees, where the default of 0 indicates
a start point extending from the center to right in a horizontal line.
@type start: float
"""
total = 0.0
for item in data:
total += item[1]
for item in data:
incr = 360.0*(item[1]/total)
draw_wedge(doc, item[0], center_x, center_y, radius, start, start + incr)
start += incr
def draw_legend(doc, start_x, start_y, data, title, label_style):
"""
Draws a legend for a graph in the specified document. The data passed is
used to define the legend. First item style is used for the optional
Legend title.
@param doc: Document to which the legend chart should be added
@type doc: BaseDoc derived class
@param start_x: x coordinate in centimeters where the left hand corner
of the legend is placed. 0 is the left hand edge of the document.
@type start_x: float
@param start_y: y coordinate in centimeters where the top of the legend
should be. 0 is the top edge of the document
@type start_y: float
@param data: List of tuples containing the data to be used to create the
legend. In order to be compatible with the graph plots, the first and
third values of the tuple used. The format is (graphics_format, value,
legend_description).
@type data: list
"""
style_sheet = doc.get_style_sheet()
if title:
gstyle = style_sheet.get_draw_style(label_style)
pstyle_name = gstyle.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(pstyle_name)
size = ReportUtils.pt2cm(pstyle.get_font().get_size())
doc.draw_text(label_style, title, start_x + (3*size), start_y - (size*0.25))
start_y += size * 1.3
for (format, size, legend) in data:
gstyle = style_sheet.get_draw_style(format)
pstyle_name = gstyle.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(pstyle_name)
size = ReportUtils.pt2cm(pstyle.get_font().get_size())
doc.draw_box(format, "", start_x, start_y, (2*size), size)
doc.draw_text(label_style, legend, start_x + (3*size), start_y - (size*0.25))
start_y += size * 1.3
_t = time.localtime(time.time())
_TODAY = parser.parse("%04d-%02d-%02d" % _t[:3])
def estimate_age(db, person, end_handle=None, start_handle=None, today=_TODAY):
"""
Estimates the age of a person based off the birth and death
dates of the person. A tuple containing the estimated upper
and lower bounds of the person's age is returned. If either
the birth or death date is missing, a (-1, -1) is returned.
@param db: GRAMPS database to which the Person object belongs
@type db: DbBase
@param person: Person object to calculate the age of
@type person: Person
@param end_handle: Determines the event handle that determines
the upper limit of the age. If None, the death event is used
@type end_handle: str
@param start_handle: Determines the event handle that determines
the lower limit of the event. If None, the birth event is
used
@type start_handle: str
@returns: tuple containing the lower and upper bounds of the
person's age, or (-1, -1) if it could not be determined.
@rtype: tuple
"""
bhandle = None
if start_handle:
bhandle = start_handle
else:
bref = person.get_birth_ref()
if bref:
bhandle = bref.get_reference_handle()
dhandle = None
if end_handle:
dhandle = end_handle
else:
dref = person.get_death_ref()
if dref:
dhandle = dref.get_reference_handle()
# if either of the events is not defined, return an error message
if not bhandle:
return (-1, -1)
bdata = db.get_event_from_handle(bhandle).get_date_object()
if dhandle:
ddata = db.get_event_from_handle(dhandle).get_date_object()
else:
if today is not None:
ddata = today
else:
return (-1, -1)
# if the date is not valid, return an error message
if not bdata.get_valid() or not ddata.get_valid():
return (-1, -1)
# if a year is not valid, return an error message
if not bdata.get_year_valid() or not ddata.get_year_valid():
return (-1, -1)
bstart = bdata.get_start_date()
bstop = bdata.get_stop_date()
dstart = ddata.get_start_date()
dstop = ddata.get_stop_date()
def _calc_diff(low, high):
if (low[1], low[0]) > (high[1], high[0]):
return high[2] - low[2] - 1
else:
return high[2] - low[2]
if bstop == dstop == Date.EMPTY:
lower = _calc_diff(bstart, dstart)
age = (lower, lower)
elif bstop == Date.EMPTY:
lower = _calc_diff(bstart, dstart)
upper = _calc_diff(bstart, dstop)
age = (lower, upper)
elif dstop == Date.EMPTY:
lower = _calc_diff(bstop, dstart)
upper = _calc_diff(bstart, dstart)
age = (lower, upper)
else:
lower = _calc_diff(bstop, dstart)
upper = _calc_diff(bstart, dstop)
age = (lower, upper)
return age
#------------------------------------------------------------------------
#
# Global options and their names
#
#------------------------------------------------------------------------
class _options:
# sort type identifiers
SORT_VALUE = 0
SORT_KEY = 1
sorts = [
(SORT_VALUE, "Item count", _("Item count")),
(SORT_KEY, "Item name", _("Item name"))
]
genders = [
(Person.UNKNOWN, "Both", _("Both")),
(Person.MALE, "Men", _("Men")),
(Person.FEMALE, "Women", _("Women"))
]
#------------------------------------------------------------------------
#
# Data extraction methods from the database
#
#------------------------------------------------------------------------
class Extract(object):
def __init__(self):
"""Methods for extracting statistical data from the database"""
# key, non-localized name, localized name, type method, data method
self.extractors = {
'data_title': ("Title", _("person|Title"),
self.get_person, self.get_title),
'data_sname': ("Surname", _("Surname"),
self.get_person, self.get_surname),
'data_fname': ("Forename", _("Forename"),
self.get_person, self.get_forename),
'data_gender': ("Gender", _("Gender"),
self.get_person, self.get_gender),
'data_byear': ("Birth year", _("Birth year"),
self.get_birth, self.get_year),
'data_dyear': ("Death year", _("Death year"),
self.get_death, self.get_year),
'data_bmonth': ("Birth month", _("Birth month"),
self.get_birth, self.get_month),
'data_dmonth': ("Death month", _("Death month"),
self.get_death, self.get_month),
'data_bplace': ("Birth place", _("Birth place"),
self.get_birth, self.get_place),
'data_dplace': ("Death place", _("Death place"),
self.get_death, self.get_place),
'data_mplace': ("Marriage place", _("Marriage place"),
self.get_marriage_handles, self.get_places),
'data_mcount': ("Number of relationships", _("Number of relationships"),
self.get_family_handles, self.get_handle_count),
'data_fchild': ("Age when first child born", _("Age when first child born"),
self.get_child_handles, self.get_first_child_age),
'data_lchild': ("Age when last child born", _("Age when last child born"),
self.get_child_handles, self.get_last_child_age),
'data_ccount': ("Number of children", _("Number of children"),
self.get_child_handles, self.get_handle_count),
'data_mage': ("Age at marriage", _("Age at marriage"),
self.get_marriage_handles, self.get_event_ages),
'data_dage': ("Age at death", _("Age at death"),
self.get_person, self.get_death_age),
'data_age': ("Age", _("Age"),
self.get_person, self.get_person_age),
'data_etypes': ("Event type", _("Event type"),
self.get_event_handles, self.get_event_type)
}
# ----------------- data extraction methods --------------------
# take an object and return a list of strings
def get_title(self, person):
"return title for given person"
# TODO: return all titles, not just primary ones...
title = person.get_primary_name().get_title()
if title:
return [title]
else:
return [_("(Preferred) title missing")]
def get_forename(self, person):
"return forenames for given person"
# TODO: return all forenames, not just primary ones...
firstnames = person.get_primary_name().get_first_name().strip()
if firstnames:
return firstnames.split()
else:
return [_("(Preferred) forename missing")]
def get_surname(self, person):
"return surnames for given person"
# TODO: return all surnames, not just primary ones...
surnames = person.get_primary_name().get_surname().strip()
if surnames:
return surnames.split()
else:
return [_("(Preferred) surname missing")]
def get_gender(self, person):
"return gender for given person"
# TODO: why there's no Person.getGenderName?
# It could be used by getDisplayInfo & this...
if person.gender == Person.MALE:
return [_("Men")]
if person.gender == Person.FEMALE:
return [_("Women")]
return [_("Gender unknown")]
def get_year(self, event):
"return year for given event"
date = event.get_date_object()
if date:
year = date.get_year()
if year:
return [str(year)]
return [_("Date(s) missing")]
def get_month(self, event):
"return month for given event"
date = event.get_date_object()
if date:
month = date.get_month()
if month:
return [displayer.long_months[month]]
return [_("Date(s) missing")]
def get_place(self, event):
"return place for given event"
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle).get_title()
if place:
return [place]
return [_("Place missing")]
def get_places(self, data):
"return places for given (person,event_handles)"
places = []
person, event_handles = data
for event_handle in event_handles:
event = self.db.get_event_from_handle(event_handle)
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle).get_title()
if place:
places.append(place)
else:
places.append(_("Place missing"))
return places
def get_person_age(self, person):
"return age for given person, if alive"
death_ref = person.get_death_ref()
if not death_ref:
return [self.estimate_age(person)]
return [_("Already dead")]
def get_death_age(self, person):
"return age at death for given person, if dead"
death_ref = person.get_death_ref()
if death_ref:
return [self.estimate_age(person, death_ref.ref)]
return [_("Still alive")]
def get_event_ages(self, data):
"return ages at given (person,event_handles)"
person, event_handles = data
ages = [self.estimate_age(person, h) for h in event_handles]
if ages:
return ages
return [_("Events missing")]
def get_event_type(self, data):
"return event types at given (person,event_handles)"
types = []
person, event_handles = data
for event_handle in event_handles:
event = self.db.get_event_from_handle(event_handle)
evtType = str(event.get_type())
types.append(evtType)
if types:
return types
return [_("Events missing")]
def get_first_child_age(self, data):
"return age when first child in given (person,child_handles) was born"
ages, errors = self.get_sorted_child_ages(data)
if ages:
errors.append(ages[0])
return errors
return [_("Children missing")]
def get_last_child_age(self, data):
"return age when last child in given (person,child_handles) was born"
ages, errors = self.get_sorted_child_ages(data)
if ages:
errors.append(ages[-1])
return errors
return [_("Children missing")]
def get_handle_count(self, data):
"return number of handles in given (person, handle_list) used for child count, family count"
return ["%3d" % len(data[1])]
# ------------------- utility methods -------------------------
def get_sorted_child_ages(self, data):
"return (sorted_ages,errors) for given (person,child_handles)"
ages = []
errors = []
person, child_handles = data
for child_handle in child_handles:
child = self.db.get_person_from_handle(child_handle)
birth_ref = child.get_birth_ref()
if birth_ref:
ages.append(self.estimate_age(person, birth_ref.ref))
else:
errors.append(_("Birth missing"))
continue
ages.sort()
return (ages, errors)
def estimate_age(self, person, end=None, begin=None):
"""return estimated age (range) for given person or error message.
age string is padded with spaces so that it can be sorted"""
age = estimate_age(self.db, person, end, begin)
if age[0] < 0 or age[1] < 0:
# inadequate information
return _("Date(s) missing")
if age[0] == age[1]:
# exact year
return "%3d" % age[0]
else:
# minimum and maximum
return "%3d-%d" % (age[0], age[1])
# ------------------- type methods -------------------------
# take db and person and return suitable gramps object(s)
def get_person(self, person):
"return person"
return person
def get_birth(self, person):
"return birth event for given person or None"
birth_ref = person.get_birth_ref()
if birth_ref:
return self.db.get_event_from_handle(birth_ref.ref)
return None
def get_death(self, person):
"return death event for given person or None"
death_ref = person.get_death_ref()
if death_ref:
return self.db.get_event_from_handle(death_ref.ref)
return None
def get_child_handles(self, person):
"return list of child handles for given person or None"
children = []
for fam_handle in person.get_family_handle_list():
fam = self.db.get_family_from_handle(fam_handle)
for child_ref in fam.get_child_ref_list():
children.append(child_ref.ref)
# TODO: it would be good to return only biological children,
# but GRAMPS doesn't offer any efficient way to check that
# (I don't want to check each children's parent family mother
# and father relations as that would make this *much* slower)
if children:
return (person, children)
return None
def get_marriage_handles(self, person):
"return list of marriage event handles for given person or None"
marriages = []
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if int(family.get_relationship()) == FamilyRelType.MARRIED:
for event_ref in family.get_event_ref_list():
event = self.db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY ):
marriages.append(event_ref.ref)
if marriages:
return (person, marriages)
return None
def get_family_handles(self, person):
"return list of family handles for given person or None"
families = person.get_family_handle_list()
if families:
return (person, families)
return None
def get_event_handles(self, person):
"return list of event handles for given person or None"
events = [ref.ref for ref in person.get_event_ref_list()]
if events:
return (person, events)
return None
# ----------------- data collection methods --------------------
def get_person_data(self, person, collect):
"""Add data from the database to 'collect' for the given person,
using methods rom the 'collect' data dict tuple
"""
for chart in collect:
# get the information
type_func = chart[2]
data_func = chart[3]
obj = type_func(person) # e.g. get_date()
if obj:
value = data_func(obj) # e.g. get_year()
else:
value = [_("Personal information missing")]
# list of information found
for key in value:
if key in chart[1]:
chart[1][key] += 1
else:
chart[1][key] = 1
def collect_data(self, db, filter_func, menu, genders,
year_from, year_to, no_years, cb_progress):
"""goes through the database and collects the selected personal
data persons fitting the filter and birth year criteria. The
arguments are:
db - the GRAMPS database
filter_func - filtering function selected by the StatisticsDialog
options - report options_dict which sets which methods are used
genders - which gender(s) to include into statistics
year_from - use only persons who've born this year of after
year_to - use only persons who've born this year or before
no_years - use also people without known birth year
cb_progress - callback to indicate progress
Returns an array of tuple of:
- Extraction method title
- Dict of values with their counts
(- Method)
"""
self.db = db # store for use by methods
data = []
ext = self.extractors
# which methods to use
for name in self.extractors:
option = menu.get_option_by_name(name)
if option.get_value() == True:
# localized data title, value dict, type and data method
data.append((ext[name][1], {}, ext[name][2], ext[name][3]))
# go through the people and collect data
for person_handle in filter_func.apply(db, db.iter_person_handles(), cb_progress):
cb_progress()
person = db.get_person_from_handle(person_handle)
# check whether person has suitable gender
if person.gender != genders and genders != Person.UNKNOWN:
continue
# check whether birth year is within required range
birth = self.get_birth(person)
if birth:
birthdate = birth.get_date_object()
if birthdate.get_year_valid():
year = birthdate.get_year()
if not (year >= year_from and year <= year_to):
continue
else:
# if death before range, person's out of range too...
death = self.get_death(person)
if death:
deathdate = death.get_date_object()
if deathdate.get_year_valid() and deathdate.get_year() < year_from:
continue
if not no_years:
# do not accept people who are not known to be in range
continue
else:
continue
else:
continue
self.get_person_data(person, data)
return data
# GLOBAL: required so that we get access to _Extract.extractors[]
# Unfortunately class variables cannot reference instance methods :-/
_Extract = Extract()
#------------------------------------------------------------------------
#
# Statistics report
#
#------------------------------------------------------------------------
class StatisticsChart(Report):
def __init__(self, database, options, user):
"""
Create the Statistics object that produces the report.
Uses the Extractor class to extract the data from the database.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
To see what the options are, check the options help in the options class.
"""
Report.__init__(self, database, options, user)
menu = options.menu
self._user = user
get_option_by_name = menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
self.filter_option = get_option_by_name('filter')
self.filter = self.filter_option.get_filter()
self.bar_items = get_value('bar_items')
year_from = get_value('year_from')
year_to = get_value('year_to')
gender = get_value('gender')
# title needs both data extraction method name + gender name
if gender == Person.MALE:
genders = _("Men")
elif gender == Person.FEMALE:
genders = _("Women")
else:
genders = None
# needed for keyword based localization
mapping = {
'genders': genders,
'year_from': year_from,
'year_to': year_to
}
# extract requested items from the database and count them
self._user.begin_progress(_('Statistics Charts'),
_('Collecting data...'), 0)
tables = _Extract.collect_data(database, self.filter, menu,
gender, year_from, year_to,
get_value('no_years'), self._user.step_progress)
self._user.end_progress()
self._user.begin_progress(_('Statistics Charts'),
_('Sorting data...'), len(tables))
self.data = []
sortby = get_value('sortby')
reverse = get_value('reverse')
for table in tables:
# generate sorted item lookup index index
lookup = self.index_items(table[1], sortby, reverse)
# document heading
mapping['chart_title'] = table[0]
if genders:
heading = _("%(genders)s born %(year_from)04d-%(year_to)04d: %(chart_title)s") % mapping
else:
heading = _("Persons born %(year_from)04d-%(year_to)04d: %(chart_title)s") % mapping
self.data.append((heading, table[0], table[1], lookup))
self._user.step_progress()
self._user.end_progress()
#DEBUG
#print heading
#print table[1]
def lookup_compare(self, a, b):
"compare given keys according to corresponding lookup values"
return cmp(self.lookup_items[a], self.lookup_items[b])
def index_items(self, data, sort, reverse):
"""creates & stores a sorted index for the items"""
# sort by item keys
index = sorted(data, reverse=True if reverse else False)
if sort == _options.SORT_VALUE:
# set for the sorting function
self.lookup_items = data
# then sort by value
index.sort(self.lookup_compare,
reverse=True if reverse else False)
return index
def write_report(self):
"output the selected statistics..."
mark = IndexMark(_('Statistics Charts'), INDEX_TYPE_TOC, 1)
self._user.begin_progress(_('Statistics Charts'),
_('Saving charts...'), len(self.data))
for data in self.data:
self.doc.start_page()
if mark:
self.doc.draw_text('SC-title', '', 0, 0, mark) # put it in TOC
mark = None # crock, but we only want one of them
if len(data[2]) < self.bar_items:
self.output_piechart(*data[:4])
else:
self.output_barchart(*data[:4])
self.doc.end_page()
self._user.step_progress()
self._user.end_progress()
def output_piechart(self, title, typename, data, lookup):
# set layout variables
middle_w = self.doc.get_usable_width() / 2
middle_h = self.doc.get_usable_height() / 2
middle = min(middle_w,middle_h)
# start output
mark = IndexMark(title, INDEX_TYPE_TOC, 2)
self.doc.center_text('SC-title', title, middle_w, 0, mark)
style_sheet = self.doc.get_style_sheet()
pstyle = style_sheet.get_paragraph_style('SC-Title')
yoffset = ReportUtils.pt2cm(pstyle.get_font().get_size())
# collect data for output
color = 0
chart_data = []
for key in lookup:
style = "SC-color-%d" % color
text = "%s (%d)" % (key, data[key])
# graphics style, value, and it's label
chart_data.append((style, data[key], text))
color = (color+1) % 7 # There are only 7 color styles defined
margin = 1.0
legendx = 2.0
# output data...
radius = middle - 2*margin
yoffset += margin + radius
draw_pie_chart(self.doc, middle_w, yoffset, radius, chart_data, -90)
yoffset += radius + 2*margin
if middle == middle_h: # Landscape
legendx = 1.0
yoffset = margin
text = _("%s (persons):") % typename
draw_legend(self.doc, legendx, yoffset, chart_data, text,'SC-legend')
def output_barchart(self, title, typename, data, lookup):
pt2cm = ReportUtils.pt2cm
style_sheet = self.doc.get_style_sheet()
pstyle = style_sheet.get_paragraph_style('SC-Text')
font = pstyle.get_font()
# set layout variables
width = self.doc.get_usable_width()
row_h = pt2cm(font.get_size())
max_y = self.doc.get_usable_height() - row_h
pad = row_h * 0.5
# check maximum value
max_value = max(data[k] for k in lookup) if lookup else 0
# horizontal area for the gfx bars
margin = 1.0
middle = width/2.0
textx = middle + margin/2.0
stopx = middle - margin/2.0
maxsize = stopx - margin
# start output
mark = IndexMark(title, INDEX_TYPE_TOC, 2)
self.doc.center_text('SC-title', title, middle, 0, mark)
pstyle = style_sheet.get_paragraph_style('SC-Title')
yoffset = pt2cm(pstyle.get_font().get_size())
#print title
# header
yoffset += (row_h + pad)
text = _("%s (persons):") % typename
self.doc.draw_text('SC-text', text, textx, yoffset)
for key in lookup:
yoffset += (row_h + pad)
if yoffset > max_y:
# for graphical report, page_break() doesn't seem to work
self.doc.end_page()
self.doc.start_page()
yoffset = 0
# right align bar to the text
value = data[key]
startx = stopx - (maxsize * value / max_value)
self.doc.draw_box('SC-bar',"",startx,yoffset,stopx-startx,row_h)
# text after bar
text = "%s (%d)" % (key, data[key])
self.doc.draw_text('SC-text', text, textx, yoffset)
#print key + ":",
return
#------------------------------------------------------------------------
#
# StatisticsChartOptions
#
#------------------------------------------------------------------------
class StatisticsChartOptions(MenuReportOptions):
def __init__(self, name, dbase):
self.__pid = None
self.__filter = None
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the statistics report.
"""
################################
add_option = partial(menu.add_option, _("Report Options"))
################################
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Determines what people are included in the report."))
add_option("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter."))
add_option("pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
sortby = EnumeratedListOption(_('Sort chart items by'),
_options.SORT_VALUE )
for item_idx in range(len(_options.sorts)):
item = _options.sorts[item_idx]
sortby.add_item(item_idx,item[2])
sortby.set_help( _("Select how the statistical data is sorted."))
add_option("sortby",sortby)
reverse = BooleanOption(_("Sort in reverse order"), False)
reverse.set_help(_("Check to reverse the sorting order."))
add_option("reverse", reverse)
this_year = time.localtime()[0]
year_from = NumberOption(_("People Born After"),
1700, 1, this_year)
year_from.set_help(_("Birth year from which to include people."))
add_option("year_from", year_from)
year_to = NumberOption(_("People Born Before"),
this_year, 1, this_year)
year_to.set_help(_("Birth year until which to include people"))
add_option("year_to", year_to)
no_years = BooleanOption(_("Include people without known birth years"),
False)
no_years.set_help(_("Whether to include people without "
"known birth years."))
add_option("no_years", no_years)
gender = EnumeratedListOption(_('Genders included'),
Person.UNKNOWN )
for item_idx in range(len(_options.genders)):
item = _options.genders[item_idx]
gender.add_item(item[0],item[2])
gender.set_help( _("Select which genders are included into "
"statistics."))
add_option("gender",gender)
bar_items = NumberOption(_("Max. items for a pie"), 8, 0, 20)
bar_items.set_help(_("With fewer items pie chart and legend will be "
"used instead of a bar chart."))
add_option("bar_items", bar_items)
# -------------------------------------------------
# List of available charts on separate option tabs
idx = 0
half = (len(_Extract.extractors))/2
self.charts = {}
for key in _Extract.extractors:
if idx < half:
category_name = _("Charts 1")
else:
category_name = _("Charts 2")
opt = BooleanOption(_Extract.extractors[key][1], False)
opt.set_help(_("Include charts with indicated data."))
menu.add_option(category_name,key, opt)
idx += 1
# Enable a couple of charts by default
menu.get_option_by_name("data_gender").set_value(True)
menu.get_option_by_name("data_ccount").set_value(True)
menu.get_option_by_name("data_bmonth").set_value(True)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = ReportUtils.get_person_filters(person, False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value in [1, 2, 3, 4]:
# Filters 1, 2, 3 and 4 rely on the center person
self.__pid.set_available(True)
else:
# The rest don't
self.__pid.set_available(False)
def make_default_style(self, default_style):
"""Make the default output style for the Statistics report."""
# Paragraph Styles
f = FontStyle()
f.set_size(10)
f.set_type_face(FONT_SERIF)
p = ParagraphStyle()
p.set_font(f)
p.set_alignment(PARA_ALIGN_LEFT)
p.set_description(_("The style used for the items and values."))
default_style.add_paragraph_style("SC-Text",p)
f = FontStyle()
f.set_size(14)
f.set_type_face(FONT_SANS_SERIF)
p = ParagraphStyle()
p.set_font(f)
p.set_alignment(PARA_ALIGN_CENTER)
p.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("SC-Title",p)
"""
Graphic Styles:
SC-title - Contains the SC-Title paragraph style used for
the title of the document
SC-text - Contains the SC-Name paragraph style used for
the individual's name
SC-color-N - The colors for drawing pies.
SC-bar - A red bar with 0.5pt black line.
"""
g = GraphicsStyle()
g.set_paragraph_style("SC-Title")
g.set_color((0,0,0))
g.set_fill_color((255,255,255))
g.set_line_width(0)
default_style.add_draw_style("SC-title",g)
g = GraphicsStyle()
g.set_paragraph_style("SC-Text")
g.set_color((0,0,0))
g.set_fill_color((255,255,255))
g.set_line_width(0)
default_style.add_draw_style("SC-text",g)
width = 0.8
# red
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((255,0,0))
g.set_line_width(width)
default_style.add_draw_style("SC-color-0",g)
# orange
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((255,158,33))
g.set_line_width(width)
default_style.add_draw_style("SC-color-1",g)
# green
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((0,178,0))
g.set_line_width(width)
default_style.add_draw_style("SC-color-2",g)
# violet
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((123,0,123))
g.set_line_width(width)
default_style.add_draw_style("SC-color-3",g)
# yellow
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((255,255,0))
g.set_line_width(width)
default_style.add_draw_style("SC-color-4",g)
# blue
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((0,105,214))
g.set_line_width(width)
default_style.add_draw_style("SC-color-5",g)
# gray
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((210,204,210))
g.set_line_width(width)
default_style.add_draw_style("SC-color-6",g)
g = GraphicsStyle()
g.set_color((0,0,0))
g.set_fill_color((255,0,0))
g.set_line_width(width)
default_style.add_draw_style("SC-bar",g)
# legend
g = GraphicsStyle()
g.set_paragraph_style('SC-Text')
g.set_color((0,0,0))
g.set_fill_color((255,255,255))
g.set_line_width(0)
default_style.add_draw_style("SC-legend",g)
|
arunkgupta/gramps
|
gramps/plugins/drawreport/statisticschart.py
|
Python
|
gpl-2.0
| 42,816
|
[
"Brian"
] |
90431c192f2a25cf2f4a87ebc38a56f47a9a89add70829526d010c890e88896b
|
import Avogadro
import unittest
from numpy import *
class TestAtom(unittest.TestCase):
def setUp(self):
self.molecule = Avogadro.molecules.addMolecule()
def test_type(self):
atom = self.molecule.addAtom()
self.assertEqual(atom.type, Avogadro.PrimitiveType.AtomType)
def test_pos(self):
atom = self.molecule.addAtom()
atom.pos
vec = array([1., 2., 3.])
atom.pos = vec
self.assertEqual(atom.pos[0], 1.)
self.assertEqual(atom.pos[1], 2.)
self.assertEqual(atom.pos[2], 3.)
def test_atomicNumber(self):
atom = self.molecule.addAtom()
self.assertEqual(atom.atomicNumber, 0)
atom.atomicNumber = 6
self.assertEqual(atom.atomicNumber, 6)
def test_forceVector(self):
atom = self.molecule.addAtom()
atom.forceVector # test if it is there
vec = array([1., 2., 3.])
atom.forceVector = vec # test setter
# test getter
self.assertEqual(atom.forceVector[0], 1.)
self.assertEqual(atom.forceVector[1], 2.)
self.assertEqual(atom.forceVector[2], 3.)
def test_residue(self):
atom = self.molecule.addAtom()
self.assertEqual(atom.residue, None)
residue = self.molecule.addResidue() # test setter
residue.addAtom(atom.id)
self.assertNotEqual(atom.residue, None) # test getter
self.assertEqual(atom.residueId, 0)
def test_bonds(self):
atom = self.molecule.addAtom()
# add 5 bonds
for i in range(5):
bond = self.molecule.addBond()
bond.setBegin(atom)
# test the length
self.assertEqual(len(atom.bonds), 5)
# test the items
for i in range(5):
self.assertEqual(atom.bonds[i], i)
def test_neighbors(self):
# add 4 atoms
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addAtom()
atom3 = self.molecule.addAtom()
atom4 = self.molecule.addAtom()
# add 3 bonds
bond1 = self.molecule.addBond()
bond2 = self.molecule.addBond()
bond3 = self.molecule.addBond()
# bond.setAtoms() calls atom.addBond()
bond1.setAtoms(atom1.id, atom2.id, 1)
bond2.setAtoms(atom1.id, atom3.id, 1)
bond3.setAtoms(atom1.id, atom4.id, 1)
# test the length
self.assertEqual(len(atom1.neighbors), 3)
def test_valence(self):
# add 3 atoms
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addAtom()
atom3 = self.molecule.addAtom()
# add 2 bonds
bond1 = self.molecule.addBond()
bond2 = self.molecule.addBond()
# bond.setAtoms() calls atom.addBond()
bond1.setAtoms(atom1.id, atom2.id, 1)
bond2.setAtoms(atom1.id, atom3.id, 1)
self.assertEqual(atom1.valence, 2)
# test bond(otherAtom)
self.assertNotEqual(atom1.bond(atom3), None)
self.assertNotEqual(atom1.bond(atom2), None)
self.assertEqual(atom2.bond(atom3), None)
def test_isHydrogen(self):
# add 3 atoms
atom = self.molecule.addAtom()
atom.atomicNumber = 1
self.assert_(atom.isHydrogen)
# ask Marcus...
def test_partialCharge(self):
atom = self.molecule.addAtom()
atom.atomicNumber = 35
self.assertEqual(atom.partialCharge, 0.0)
atom.partialCharge = 0.325
self.assertEqual(atom.partialCharge, 0.325)
if __name__ == "__main__":
unittest.main()
|
rcplane/periodicdisplay
|
reference/avogadro/libavogadro/src/python/unittest/atom.py
|
Python
|
gpl-2.0
| 3,207
|
[
"Avogadro"
] |
5f87be128b4d555e7ed63184f23cd4757d215d42515236f3b714f4e91921452f
|
# Configuration file for jointcal
from lsst.meas.algorithms import LoadIndexedReferenceObjectsTask
# Select external catalogs for Astrometry
config.astrometryRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.astrometryRefObjLoader.ref_dataset_name='pan-starrs'
config.astrometryRefObjLoader.filterMap = {
'u':'g',
'g':'g',
'r':'r',
'i':'i',
'i2': 'i',
'z':'z',
'y':'y',
}
# Type of model to fit to astrometry
# Allowed values:
# simplePoly One polynomial per ccd
# constrainedPoly One polynomial per ccd, and one polynomial per visit
# None Field is optional
#
config.astrometryModel='simplePoly' # for the record (default value)
# Select external catalogs for Photometry
config.doPhotometry = False # True # comment out to run the photometric calibration
config.photometryRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.photometryRefObjLoader.ref_dataset_name='sdss'
config.photometryRefObjLoader.filterMap = {
'u': 'U',
'g': 'G',
'r': 'R',
'i': 'I',
'i2': 'I',
'z': 'Z',
'y': 'Z',
}
# These are the default values
# Minimum allowed signal-to-noise ratio for sources used for matching
# (in the flux specified by sourceFluxType); <= 0 for no limit
# config.sourceSelector['matcher'].minSnr = 40.0
# Minimum allowed signal-to-noise ratio for sources used for matching
# (in the flux specified by sourceFluxType); <= 0 for no limit
config.sourceSelector['astrometry'].minSnr = 40.0 # default is 10
|
LSSTDESC/ReprocessingTaskForce
|
config/w_2017_52/jointcalConfig.py
|
Python
|
gpl-2.0
| 1,496
|
[
"VisIt"
] |
9d7231bbad628e6f13c34a0114ca73ee21f2959b5b9dc770cfb8c9839944a4a6
|
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The mesh module provides mesh generators: methods that return a topology and an
accompanying geometry function. Meshes can either be generated on the fly, e.g.
:func:`rectilinear`, or read from external an externally prepared file,
:func:`gmsh`, and converted to nutils format. Note that no mesh writers are
provided at this point.
"""
from . import topology, function, util, element, numpy, numeric, transform, transformseq, warnings, types, cache, _
from .elementseq import References
import os, itertools, re, math, treelog as log, io, contextlib
# MESH GENERATORS
@log.withcontext
def rectilinear(richshape, periodic=(), name='rect'):
'rectilinear mesh'
ndims = len(richshape)
shape = []
offset = []
scale = []
uniform = True
for v in richshape:
if numeric.isint(v):
assert v > 0
shape.append(v)
scale.append(1)
offset.append(0)
elif numpy.equal(v, numpy.linspace(v[0],v[-1],len(v))).all():
shape.append(len(v)-1)
scale.append((v[-1]-v[0]) / float(len(v)-1))
offset.append(v[0])
else:
shape.append(len(v)-1)
uniform = False
root = transform.Identifier(ndims, name)
axes = [transformseq.DimAxis(0,n,idim in periodic) for idim, n in enumerate(shape)]
topo = topology.StructuredTopology(root, axes)
if uniform:
if all(o == offset[0] for o in offset[1:]):
offset = offset[0]
if all(s == scale[0] for s in scale[1:]):
scale = scale[0]
geom = function.rootcoords(ndims) * scale + offset
else:
funcsp = topo.basis('spline', degree=1, periodic=())
coords = numeric.meshgrid(*richshape).reshape(ndims, -1)
geom = (funcsp * coords).sum(-1)
return topo, geom
def line(nodes, periodic=False, bnames=None):
if isinstance(nodes, int):
uniform = True
assert nodes > 0
nelems = nodes
scale = 1
offset = 0
else:
nelems = len(nodes)-1
scale = (nodes[-1]-nodes[0]) / nelems
offset = nodes[0]
uniform = numpy.equal(nodes, offset + numpy.arange(nelems+1) * scale).all()
root = transform.Identifier(1, 'line')
domain = topology.StructuredLine(root, 0, nelems, periodic=periodic, bnames=bnames)
geom = function.rootcoords(1) * scale + offset if uniform else domain.basis('std', degree=1, periodic=False).dot(nodes)
return domain, geom
def newrectilinear(nodes, periodic=None, bnames=[['left','right'],['bottom','top'],['front','back']]):
if periodic is None:
periodic = numpy.zeros(len(nodes), dtype=bool)
else:
periodic = numpy.asarray(periodic)
assert len(periodic) == len(nodes) and periodic.ndim == 1 and periodic.dtype == bool
dims = [line(nodesi, periodici, bnamesi) for nodesi, periodici, bnamesi in zip(nodes, periodic, tuple(bnames)+(None,)*len(nodes))]
domain, geom = dims.pop(0)
for domaini, geomi in dims:
domain = domain * domaini
geom = function.concatenate(function.bifurcate(geom,geomi))
return domain, geom
@log.withcontext
def multipatch(patches, nelems, patchverts=None, name='multipatch'):
'''multipatch rectilinear mesh generator
Generator for a :class:`~nutils.topology.MultipatchTopology` and geometry.
The :class:`~nutils.topology.MultipatchTopology` consists of a set patches,
where each patch is a :class:`~nutils.topology.StructuredTopology` and all
patches have the same number of dimensions.
The ``patches`` argument, a :class:`numpy.ndarray`-like with shape
``(npatches, 2*ndims)`` or ``(npatches,)+(2,)*ndims``, defines the
connectivity by labelling the patch vertices. For example, three
one-dimensional patches can be connected at one edge by::
# connectivity: 3
# │
# 1──0──2
patches=[[0,1], [0,2], [0,3]]
Or two two-dimensional patches along an edge by::
# connectivity: 3──4──5
# │ │ │
# 0──1──2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
The geometry is specified by the ``patchverts`` argument: a
:class:`numpy.ndarray`-like with shape ``(nverts,ngeomdims)`` specifying for
each vertex a coordinate. Note that the dimension of the geometry may be
higher than the dimension of the patches. The created geometry is a
patch-wise linear interpolation of the vertex coordinates. If the
``patchverts`` argument is omitted the geometry describes a unit hypercube
per patch.
The ``nelems`` argument is either an :class:`int` defining the number of
elements per patch per dimension, or a :class:`dict` with edges (a pair of
vertex numbers) as keys and the number of elements (:class:`int`) as values,
with key ``None`` specifying the default number of elements. Example::
# connectivity: 3─────4─────5
# │ 4x3 │ 8x3 │
# 0─────1─────2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
nelems={None: 4, (1,2): 8, (4,5): 8, (0,3): 3, (1,4): 3, (2,5): 3}
Since the patches are structured topologies, the number of elements per
patch per dimension should be unambiguous. In above example specifying
``nelems={None: 4, (1,2): 8}`` will raise an exception because the patch on
the right has 8 elements along edge ``(1,2)`` and 4 along ``(4,5)``.
Example
-------
An L-shaped domain can be generated by::
>>> # connectivity: 2──5
>>> # │ |
>>> # 1──4─────7 y
>>> # │ │ │ │
>>> # 0──3─────6 └──x
>>> domain, geom = multipatch(
... patches=[[0,1,3,4], [1,2,4,5], [3,4,6,7]],
... patchverts=[[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [3,0], [3,1]],
... nelems={None: 4, (3,6): 8, (4,7): 8})
The number of elements is chosen such that all elements in the domain have
the same size.
A topology and geometry describing the surface of a sphere can be generated
by creating a multipatch cube surface and inflating the cube to a sphere:
>>> # connectivity: 3────7
>>> # ╱│ ╱│
>>> # 2────6 │ y
>>> # │ │ │ │ │
>>> # │ 1──│─5 │ z
>>> # │╱ │╱ │╱
>>> # 0────4 *────x
>>> import itertools
>>> from nutils import function
>>> topo, cube = multipatch(
... patches=[
... [0,1,2,3], # left, normal: x
... [4,5,6,7], # right, normal: x
... [0,1,4,5], # bottom, normal: -y
... [2,3,6,7], # top, normal: -y
... [0,2,4,6], # front, normal: z
... [1,3,5,7], # back, normal: z
... ],
... patchverts=tuple(itertools.product(*([[-1,1]]*3))),
... nelems=1)
>>> sphere = function.normalized(cube)
The normals of the patches are determined by the order of the vertex numbers.
An outward normal for the cube is obtained by flipping the left, top and
front faces:
>>> cubenormal = cube.normal(exterior=True) * topo.basis('patch').dot([-1,1,1,-1,-1,1])
At the centroids of the faces the outward normal should equal the cube geometry:
>>> numpy.testing.assert_allclose(*topo.sample('gauss', 1).eval([cubenormal, cube]))
Similarly, the outward normal of the sphere is obtained by:
>>> spherenormal = sphere.normal(exterior=True) * topo.basis('patch').dot([-1,1,1,-1,-1,1])
>>> numpy.testing.assert_allclose(*topo.sample('gauss', 1).eval([spherenormal, cube]))
Args
----
patches:
A :class:`numpy.ndarray` with shape sequence of patches with each patch being a list of vertex indices.
patchverts:
A sequence of coordinates of the vertices.
nelems:
Either an :class:`int` specifying the number of elements per patch per
dimension, or a :class:`dict` with edges (a pair of vertex numbers) as
keys and the number of elements (:class:`int`) as values, with key
``None`` specifying the default number of elements.
Returns
-------
:class:`nutils.topology.MultipatchTopology`:
The multipatch topology.
:class:`nutils.function.Array`:
The geometry defined by the ``patchverts`` or a unit hypercube per patch
if ``patchverts`` is not specified.
'''
patches = numpy.array(patches)
if patches.dtype != int:
raise ValueError('`patches` should be an array of ints.')
if patches.ndim < 2 or patches.ndim == 2 and patches.shape[-1] % 2 != 0:
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
elif patches.ndim > 2 and patches.shape[1:] != (2,) * (patches.ndim - 1):
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
patches = patches.reshape(patches.shape[0], -1)
# determine topological dimension of patches
ndims = 0
while 2**ndims < patches.shape[1]:
ndims += 1
if 2**ndims > patches.shape[1]:
raise ValueError('Only hyperrectangular patches are supported: ' \
'number of patch vertices should be a power of two.')
patches = patches.reshape([patches.shape[0]] + [2]*ndims)
# group all common patch edges (and/or boundaries?)
if isinstance(nelems, int):
nelems = {None: nelems}
elif isinstance(nelems, dict):
nelems = {(k and frozenset(k)): v for k, v in nelems.items()}
else:
raise ValueError('`nelems` should be an `int` or `dict`')
# create patch topologies, geometries
if patchverts is not None:
patchverts = numpy.array(patchverts)
indices = set(patches.flat)
if tuple(sorted(indices)) != tuple(range(len(indices))):
raise ValueError('Patch vertices in `patches` should be numbered consecutively, starting at 0.')
if len(patchverts) != len(indices):
raise ValueError('Number of `patchverts` does not equal number of vertices specified in `patches`.')
if len(patchverts.shape) != 2:
raise ValueError('Every patch vertex should be an array of dimension 1.')
topos = []
coords = []
for i, patch in enumerate(patches):
# find shape of patch and local patch coordinates
shape = []
for dim in range(ndims):
nelems_sides = []
sides = [(0,1)]*ndims
sides[dim] = slice(None),
for side in itertools.product(*sides):
sideverts = frozenset(patch[side])
if sideverts in nelems:
nelems_sides.append(nelems[sideverts])
else:
nelems_sides.append(nelems[None])
if len(set(nelems_sides)) != 1:
raise ValueError('duplicate number of elements specified for patch {} in dimension {}'.format(i, dim))
shape.append(nelems_sides[0])
# create patch topology
topos.append(rectilinear(shape, name='{}{}'.format(name, i))[0])
# compute patch geometry
patchcoords = [numpy.linspace(0, 1, n+1) for n in shape]
patchcoords = numeric.meshgrid(*patchcoords).reshape(ndims, -1)
if patchverts is not None:
patchcoords = numpy.array([
sum(
patchverts[j]*util.product(c if s else 1-c for c, s in zip(coord, side))
for j, side in zip(patch.flat, itertools.product(*[[0,1]]*ndims))
)
for coord in patchcoords.T
]).T
coords.append(patchcoords)
# build patch boundary data
boundarydata = topology.MultipatchTopology.build_boundarydata(patches)
# join patch topologies, geometries
topo = topology.MultipatchTopology(tuple(map(topology.Patch, topos, patches, boundarydata)))
funcsp = topo.basis('spline', degree=1, patchcontinuous=False)
geom = (funcsp * numpy.concatenate(coords, axis=1)).sum(-1)
return topo, geom
@cache.function
def parsegmsh(mshdata):
"""Gmsh parser
Parser for Gmsh data in ``msh2`` or ``msh4`` format. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
mshdata : :class:`io.BufferedIOBase`
Msh file contents.
Returns
-------
:class:`dict`:
Keyword arguments for :func:`simplex`
"""
try:
from meshio import gmsh
except ImportError as e:
raise Exception('parsegmsh requires the meshio module to be installed') from e
msh = gmsh.main.read_buffer(mshdata)
if not msh.cell_sets:
# Old versions of the gmsh file format repeat elements that have multiple
# tags. To support this we edit the meshio data to bring it in the same
# form as the new files by deduplicating cells and creating cell_sets.
renums = []
for icell, cells in enumerate(msh.cells):
keep = (cells.data[1:] != cells.data[:-1]).any(axis=1)
if keep.all():
renum = numpy.arange(len(cells.data))
else:
msh.cells[icell] = cells._replace(data=cells.data[numpy.hstack([True, keep])])
renum = numpy.hstack([0, keep.cumsum()])
renums.append(renum)
for name, (itag, nd) in msh.field_data.items():
msh.cell_sets[name] = [renum[data == itag] for data, renum in zip(msh.cell_data['gmsh:physical'], renums)]
# Coords is a 2d float-array such that coords[inode,idim] == coordinate.
coords = msh.points
# Nodes is a dictionary that maps a topological dimension to a 2d int-array
# dictionary such that nodes[nd][ielem,ilocal] == inode, where ilocal < nd+1
# for linear geometries or larger for higher order geometries. Since meshio
# stores nodes by simplex type and cell, simplex types are mapped to
# dimensions and gathered, after which cells are concatenated under the
# assumption that there is only one simplex type per dimension.
nodes = {('ver','lin','tri','tet').index(typename[:3]): numpy.concatenate(datas, axis=0)
for typename, datas in util.gather((cells.type, cells.data) for cells in msh.cells)}
# Identities is a 2d [master, slave] int-aray that pairs matching nodes on
# periodic walls. For the topological connectivity, all slaves in the nodes
# arrays will be replaced by their master counterpart.
identities = numpy.zeros((0, 2), dtype=int) if not msh.gmsh_periodic \
else numpy.concatenate([d for a, b, c, d in msh.gmsh_periodic], axis=0)
# Tags is a list of (nd, name, ndelems) tuples that define topological groups
# per dimension. Since meshio associates group names with cells, which are
# concatenated in nodes, element ids are offset and concatenated to match.
tags = [(msh.field_data[name][1], name, numpy.concatenate([selection
+ sum(len(cells.data) for cells in msh.cells[:icell] if cells.type == msh.cells[icell].type) # offset into nodes
for icell, selection in enumerate(selections)]))
for name, selections in msh.cell_sets.items()]
# determine the dimension of the topology
ndims = max(nodes)
# determine the dimension of the geometry
assert not numpy.isnan(coords).any()
while coords.shape[1] > ndims and not coords[:,-1].any():
coords = coords[:,:-1]
# separate geometric, topological nodes
cnodes = nodes[ndims]
if cnodes.shape[1] > ndims+1: # higher order geometry
nodes = {nd: n[:,:nd+1] for nd, n in nodes.items()} # remove high order info
if len(identities):
slaves, masters = identities.T
keep = numpy.ones(len(coords), dtype=bool)
keep[slaves] = False
assert keep[masters].all()
renumber = keep.cumsum()-1
renumber[slaves] = renumber[masters]
nodes = {nd: renumber[n] for nd, n in nodes.items()}
vnodes = nodes[ndims]
bnodes = nodes.get(ndims-1)
pnodes = nodes.get(0)
if cnodes is vnodes: # geometry is linear and non-periodic, dofs follow in-place sorting of nodes
degree = 1
elif cnodes.shape[1] == ndims+1: # linear elements: match sorting of nodes
degree = 1
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[numpy.arange(len(cnodes))[:,_], shuffle] # gmsh conveniently places the primary ndim+1 vertices first
else: # higher order elements: match sorting of nodes and renumber higher order coefficients
degree, nodeorder = { # for meshio's node ordering conventions see http://www.vtk.org/VTK/img/file-formats.pdf
(2, 6): (2, (0,3,1,5,4,2)),
(2,10): (3, (0,3,4,1,8,9,5,7,6,2)),
(2,15): (4, (0,3,4,5,1,11,12,13,6,10,14,7,9,8,2)),
(3,10): (2, (0,4,1,6,5,2,7,8,9,3))}[ndims, cnodes.shape[1]]
enum = numpy.empty([degree+1]*(ndims+1), dtype=int)
bari = tuple(numpy.array([index[::-1] for index in numpy.ndindex(*enum.shape) if sum(index) == degree]).T)
enum[bari] = numpy.arange(cnodes.shape[1]) # maps baricentric index to corresponding enumerated index
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[:,nodeorder] # convert from gmsh to nutils order
for i in range(ndims): # strategy: apply shuffle to cnodes by sequentially swapping vertices...
for j in range(i+1, ndims+1): # ...considering all j > i pairs...
m = shuffle[:,i] == j # ...and swap vertices if vertex j is shuffled into i...
r = enum.swapaxes(i,j)[bari] # ...using the enum table to generate the appropriate renumbering
cnodes[m,:] = cnodes[numpy.ix_(m,r)]
m = shuffle[:,j] == i
shuffle[m,j] = shuffle[m,i] # update shuffle to track changed vertex positions
vnodes.sort(axis=1)
nnodes = vnodes[:,-1].max()+1
vtags, btags, ptags = {}, {}, {}
edge_vertices = numpy.arange(ndims+1).repeat(ndims).reshape(ndims, ndims+1)[:,::-1].T # nedges x ndims
for nd, name, ielems in tags:
if nd == ndims:
vtags[name] = numpy.array(ielems)
elif nd == ndims-1:
edgenodes = bnodes[ielems]
nodemask = numeric.asboolean(edgenodes.ravel(), size=nnodes, ordered=False)
ielems, = (nodemask[vnodes].sum(axis=1) >= ndims).nonzero() # all elements sharing at least ndims edgenodes
edgemap = {tuple(b): (ielem, iedge) for ielem, a in zip(ielems, vnodes[ielems[:,_,_], edge_vertices[_,:,:]]) for iedge, b in enumerate(a)}
btags[name] = numpy.array([edgemap[tuple(sorted(n))] for n in edgenodes])
elif nd == 0:
ptags[name] = pnodes[ielems][...,0]
log.info('\n- '.join(['loaded {}d gmsh topology consisting of #{} elements'.format(ndims, len(cnodes))]
+ [name + ' groups: ' + ', '.join('{} #{}'.format(n, len(e)) for n, e in tags.items())
for name, tags in (('volume', vtags), ('boundary', btags), ('point', ptags)) if tags]))
return dict(nodes=vnodes, cnodes=cnodes, coords=coords, tags=vtags, btags=btags, ptags=ptags)
@log.withcontext
@types.apply_annotations
def gmsh(fname:util.binaryfile, name='gmsh'):
"""Gmsh parser
Parser for Gmsh files in `.msh` format. Only files with physical groups are
supported. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
fname : :class:`str` or :class:`io.BufferedIOBase`
Path to mesh file or mesh file object.
name : :class:`str` or :any:`None`
Name of parsed topology, defaults to 'gmsh'.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology of parsed Gmsh file.
geom : :class:`nutils.function.Array`
Isoparametric map.
"""
with fname as f:
return simplex(name=name, **parsegmsh(f))
def simplex(nodes, cnodes, coords, tags, btags, ptags, name='simplex'):
'''Simplex topology.
Parameters
----------
nodes : :class:`numpy.ndarray`
Vertex indices as (nelems x ndims+1) integer array, sorted along the
second dimension. This table fully determines the connectivity of the
simplices.
cnodes : :class:`numpy.ndarray`
Coordinate indices as (nelems x ncnodes) integer array following Nutils'
conventions for Bernstein polynomials. The polynomial degree is inferred
from the array shape.
coords : :class:`numpy.ndarray`
Coordinates as (nverts x ndims) float array to be indexed by ``cnodes``.
tags : :class:`dict`
Dictionary of name->element numbers. Element order is preserved in the
resulting volumetric groups.
btags : :class:`dict`
Dictionary of name->edges, where edges is a (nedges x 2) integer array
containing pairs of element number and edge number. The segments are
assigned to boundary or interfaces groups automatically while otherwise
preserving order.
ptags : :class:`dict`
Dictionary of name->node numbers referencing the ``nodes`` table.
name : :class:`str`
Name of simplex topology.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology with volumetric, boundary and interface groups.
geom : :class:`nutils.function.Array`
Geometry function.
'''
nverts = len(coords)
nelems, ncnodes = cnodes.shape
ndims = nodes.shape[1] - 1
assert len(nodes) == nelems
assert numpy.greater(nodes[:,1:], nodes[:,:-1]).all(), 'nodes must be sorted'
if ncnodes == ndims+1:
degree = 1
vnodes = cnodes
else:
degree = int((ncnodes * math.factorial(ndims))**(1/ndims))-1 # degree**ndims/ndims! < ncnodes < (degree+1)**ndims/ndims!
dims = numpy.arange(ndims)
strides = (dims+1+degree).cumprod() // (dims+1).cumprod() # (i+1+degree)!/(i+1)!
assert strides[-1] == ncnodes
vnodes = cnodes[:,(0,*strides-1)]
assert vnodes.shape == nodes.shape
transforms = transformseq.IdentifierTransforms(ndims=ndims, name=name, length=nelems)
topo = topology.SimplexTopology(nodes, transforms, transforms)
coeffs = element.getsimplex(ndims).get_poly_coeffs('lagrange', degree=degree)
basis = function.PlainBasis([coeffs] * nelems, cnodes, nverts, topo.transforms)
geom = (basis[:,_] * coords).sum(0)
connectivity = topo.connectivity
bgroups = {}
igroups = {}
for name, elems_edges in btags.items():
bitems = [], [], None
iitems = [], [], []
for ielem, iedge in elems_edges:
ioppelem = connectivity[ielem, iedge]
simplices, transforms, opposites = bitems if ioppelem == -1 else iitems
simplices.append(tuple(nodes[ielem][:iedge])+tuple(nodes[ielem][iedge+1:]))
transforms.append(topo.transforms[ielem] + (transform.SimplexEdge(ndims, iedge),))
if opposites is not None:
opposites.append(topo.transforms[ioppelem] + (transform.SimplexEdge(ndims, tuple(connectivity[ioppelem]).index(ielem)),))
for groups, (simplices, transforms, opposites) in (bgroups, bitems), (igroups, iitems):
if simplices:
transforms = transformseq.PlainTransforms(transforms, ndims-1)
opposites = transforms if opposites is None else transformseq.PlainTransforms(opposites, ndims-1)
groups[name] = topology.SimplexTopology(simplices, transforms, opposites)
pgroups = {}
if ptags:
ptrans = [transform.Matrix(linear=numpy.zeros(shape=(ndims,0)), offset=offset) for offset in numpy.eye(ndims+1)[:,1:]]
pmap = {inode: numpy.array(numpy.equal(nodes, inode).nonzero()).T for inode in set.union(*map(set, ptags.values()))}
for pname, inodes in ptags.items():
ptransforms = transformseq.PlainTransforms([topo.transforms[ielem] + (ptrans[ivertex],) for inode in inodes for ielem, ivertex in pmap[inode]], 0)
preferences = References.uniform(element.getsimplex(0), len(ptransforms))
pgroups[pname] = topology.Topology(preferences, ptransforms, ptransforms)
vgroups = {}
for name, ielems in tags.items():
if len(ielems) == nelems and numpy.equal(ielems, numpy.arange(nelems)).all():
vgroups[name] = topo.withgroups(bgroups=bgroups, igroups=igroups, pgroups=pgroups)
continue
transforms = topo.transforms[ielems]
vtopo = topology.SimplexTopology(nodes[ielems], transforms, transforms)
keep = numpy.zeros(nelems, dtype=bool)
keep[ielems] = True
vbgroups = {}
vigroups = {}
for bname, elems_edges in btags.items():
bitems = [], [], []
iitems = [], [], []
for ielem, iedge in elems_edges:
ioppelem = connectivity[ielem, iedge]
if ioppelem == -1:
keepopp = False
else:
keepopp = keep[ioppelem]
ioppedge = tuple(connectivity[ioppelem]).index(ielem)
if keepopp and keep[ielem]:
simplices, transforms, opposites = iitems
elif keepopp or keep[ielem]:
simplices, transforms, opposites = bitems
if keepopp:
ielem, iedge, ioppelem, ioppedge = ioppelem, ioppedge, ielem, iedge
else:
continue
simplices.append(tuple(nodes[ielem][:iedge])+tuple(nodes[ielem][iedge+1:]))
transforms.append(topo.transforms[ielem] + (transform.SimplexEdge(ndims, iedge),))
if ioppelem != -1:
opposites.append(topo.transforms[ioppelem] + (transform.SimplexEdge(ndims, ioppedge),))
for groups, (simplices, transforms, opposites) in (vbgroups, bitems), (vigroups, iitems):
if simplices:
transforms = transformseq.PlainTransforms(transforms, ndims-1)
opposites = transformseq.PlainTransforms(opposites, ndims-1) if len(opposites) == len(transforms) else transforms
groups[bname] = topology.SimplexTopology(simplices, transforms, opposites)
vpgroups = {}
for pname, inodes in ptags.items():
ptransforms = transformseq.PlainTransforms([topo.transforms[ielem] + (ptrans[ivertex],) for inode in inodes for ielem, ivertex in pmap[inode] if keep[ielem]], 0)
preferences = References.uniform(element.getsimplex(0), len(ptransforms))
vpgroups[pname] = topology.Topology(preferences, ptransforms, ptransforms)
vgroups[name] = vtopo.withgroups(bgroups=vbgroups, igroups=vigroups, pgroups=vpgroups)
return topo.withgroups(vgroups=vgroups, bgroups=bgroups, igroups=igroups, pgroups=pgroups), geom
def fromfunc(func, nelems, ndims, degree=1):
'piecewise'
if isinstance(nelems, int):
nelems = [nelems]
assert len(nelems) == func.__code__.co_argcount
topo, ref = rectilinear([numpy.linspace(0,1,n+1) for n in nelems])
funcsp = topo.basis('spline', degree=degree).vector(ndims)
coords = topo.projection(func, onto=funcsp, coords=ref, exact_boundaries=True)
return topo, coords
def unitsquare(nelems, etype):
'''Unit square mesh.
Args
----
nelems : :class:`int`
Number of elements along boundary
etype : :class:`str`
Type of element used for meshing. Supported are:
* ``"square"``: structured mesh of squares.
* ``"triangle"``: unstructured mesh of triangles.
* ``"mixed"``: unstructured mesh of triangles and squares.
Returns
-------
:class:`nutils.topology.Topology`:
The structured/unstructured topology.
:class:`nutils.function.Array`:
The geometry function.
'''
root = transform.Identifier(2, 'unitsquare')
if etype == 'square':
topo = topology.StructuredTopology(root, [transformseq.DimAxis(0, nelems, False)] * 2)
elif etype in ('triangle', 'mixed'):
simplices = numpy.concatenate([
numpy.take([i*(nelems+1)+j, i*(nelems+1)+j+1, (i+1)*(nelems+1)+j, (i+1)*(nelems+1)+j+1], [[0,1,2],[1,2,3]] if i%2==j%2 else [[0,1,3],[0,2,3]], axis=0)
for i in range(nelems) for j in range(nelems)])
v = numpy.arange(nelems+1, dtype=float)
coords = numeric.meshgrid(v, v).reshape(2,-1).T
transforms = transformseq.PlainTransforms([(root, transform.Square((c[1:]-c[0]).T, c[0])) for c in coords[simplices]], 2)
topo = topology.SimplexTopology(simplices, transforms, transforms)
if etype == 'mixed':
references = list(topo.references)
transforms = list(topo.transforms)
square = element.getsimplex(1)**2
connectivity = list(topo.connectivity)
isquares = [i * nelems + j for i in range(nelems) for j in range(nelems) if i%2==j%3]
for n in sorted(isquares, reverse=True):
i, j = divmod(n, nelems)
references[n*2:(n+1)*2] = square,
transforms[n*2:(n+1)*2] = (root, transform.Shift([float(i),float(j)])),
connectivity[n*2:(n+1)*2] = numpy.concatenate(connectivity[n*2:(n+1)*2])[[3,2,4,1] if i%2==j%2 else [3,2,0,5]],
connectivity = [c-numpy.greater(c,n*2) for c in connectivity]
topo = topology.ConnectedTopology(References.from_iter(references, 2), transformseq.PlainTransforms(transforms, 2),transformseq.PlainTransforms(transforms, 2), tuple(types.frozenarray(c, copy=False) for c in connectivity))
x, y = topo.boundary.elem_mean(function.rootcoords(2), degree=1).T
bgroups = dict(left=x==0, right=x==nelems, bottom=y==0, top=y==nelems)
topo = topo.withboundary(**{name: topo.boundary[numpy.where(mask)[0]] for name, mask in bgroups.items()})
else:
raise Exception('invalid element type {!r}'.format(etype))
return topo, function.rootcoords(2) / nelems
# vim:sw=2:sts=2:et
|
wijnandhoitinga/nutils
|
nutils/mesh.py
|
Python
|
mit
| 29,584
|
[
"VTK"
] |
7f2dfeec4c8726bcc8b28cc444d1fa412ec237f1b31294bf184cdb767736a2ec
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Sławek Piotrowski <sentinel@atteo.org>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 2
id = 'rfm12'
name = 'RFM12'
longname = 'RFM12 control protocol'
desc = 'HopeRF RFM12 wireless transceiver control protocol.'
license = 'gplv2+'
inputs = ['spi']
outputs = ['rfm12']
annotations = (
('cmd', 'Command'),
('params', 'Command parameters'),
('disabled', 'Disabled bits'),
('return', 'Returned values'),
('disabled_return', 'Disabled returned values'),
('interpretation', 'Interpretation'),
)
annotation_rows = (
('commands', 'Commands', (0, 1, 2)),
('return', 'Return', (3, 4)),
('interpretation', 'Interpretation', (5,)),
)
def __init__(self):
self.mosi_bytes, self.miso_bytes = [], []
self.mosi_bits, self.miso_bits = [], []
self.row_pos = [0, 0, 0]
self.ann_to_row = [0, 0, 0, 1, 1, 2]
# Initialize with Power-On-Reset values.
self.last_status = [0x00, 0x00]
self.last_config = 0x08
self.last_power = 0x08
self.last_freq = 0x680
self.last_data_rate = 0x23
self.last_fifo_and_reset = 0x80
self.last_afc = 0xF7
self.last_transceiver = 0x00
self.last_pll = 0x77
def advance_ann(self, ann, length):
row = self.ann_to_row[ann]
self.row_pos[row] += length
def putx(self, ann, length, description):
if not isinstance(description, list):
description = [description]
row = self.ann_to_row[ann]
bit = self.row_pos[row]
self.put(self.mosi_bits[bit][1], self.mosi_bits[bit + length - 1][2],
self.out_ann, [ann, description])
bit += length
self.row_pos[row] = bit
def describe_bits(self, data, names):
i = 0x01 << len(names) - 1
bit = 0
while i != 0:
if names[bit] != '':
self.putx(1 if (data & i) else 2, 1, names[bit])
i >>= 1
bit += 1
def describe_return_bits(self, data, names):
i = 0x01 << len(names) - 1
bit = 0
while i != 0:
if names[bit] != '':
self.putx(3 if (data & i) else 4, 1, names[bit])
else:
self.advance_ann(3, 1)
i >>= 1
bit += 1
def describe_changed_bits(self, data, old_data, names):
changes = data ^ old_data
i = 0x01 << (len(names) - 1)
bit = 0
while i != 0:
if names[bit] != '' and changes & i:
s = ['+', 'Turning on'] if (data & i) else ['-', 'Turning off']
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
i >>= 1
bit += 1
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_configuration_cmd(self, cmd, ret):
self.putx(0, 8, ['Configuration command', 'Configuration'])
NAMES = [['Internal data register', 'el'], ['FIFO mode', 'ef']]
bits = (cmd[1] & 0xC0) >> 6
old_bits = (self.last_config & 0xC0) >> 6
self.describe_bits(bits, NAMES)
self.describe_changed_bits(bits, old_bits, NAMES)
FREQUENCIES = ['315', '433', '868', '915']
f = FREQUENCIES[(cmd[1] & 0x30) >> 4] + 'MHz'
self.putx(1, 2, ['Frequency: ' + f, f])
if cmd[1] & 0x30 != self.last_config & 0x30:
self.putx(5, 2, ['Changed', '~'])
c = '%.1fpF' % (8.5 + (cmd[1] & 0xF) * 0.5)
self.putx(1, 4, ['Capacitance: ' + c, c])
if cmd[1] & 0xF != self.last_config & 0xF:
self.putx(5, 4, ['Changed', '~'])
self.last_config = cmd[1]
def handle_power_management_cmd(self, cmd, ret):
self.putx(0, 8, ['Power management', 'Power'])
NAMES = [['Receiver chain', 'er'], ['Baseband circuit', 'ebb'],
['Transmission', 'et'], ['Synthesizer', 'es'],
['Crystal oscillator', 'ex'], ['Low battery detector', 'eb'],
['Wake-up timer', 'ew'], ['Clock output off switch', 'dc']]
self.describe_bits(cmd[1], NAMES)
power = cmd[1]
# Some bits imply other, even if they are set to 0.
if power & 0x80:
power |= 0x58
if power & 0x20:
power |= 0x18
self.describe_changed_bits(power, self.last_power, NAMES)
self.last_power = power
def handle_frequency_setting_cmd(self, cmd, ret):
self.putx(0, 4, ['Frequency setting', 'Frequency'])
f = ((cmd[1] & 0xF) << 8) + cmd[2]
self.putx(0, 12, ['F = %3.4f' % f])
self.row_pos[2] -= 4
if self.last_freq != f:
self.putx(5, 12, ['Changing', '~'])
self.last_freq = f
def handle_data_rate_cmd(self, cmd, ret):
self.putx(0, 8, ['Data rate command', 'Data rate'])
r = cmd[1] & 0x7F
cs = (cmd[1] & 0x80) >> 7
rate = 10000 / 29.0 / (r + 1) / (1 + 7 * cs)
self.putx(0, 8, ['%3.1fkbps' % rate])
if self.last_data_rate != cmd[1]:
self.putx(5, 8, ['Changing', '~'])
self.last_data_rate = cmd[1]
def handle_receiver_control_cmd(self, cmd, ret):
self.putx(0, 5, ['Receiver control command'])
s = 'interrupt input' if (cmd[0] & 0x04) else 'VDI output'
self.putx(0, 1, ['pin16 = ' + s])
VDI_NAMES = ['Fast', 'Medium', 'Slow', 'Always on']
vdi_speed = VDI_NAMES[cmd[0] & 0x3]
self.putx(0, 2, ['VDI: %s' % vdi_speed])
BANDWIDTH_NAMES = ['Reserved', '400kHz', '340kHz', '270kHz', '200kHz',
'134kHz', '67kHz', 'Reserved']
bandwidth = BANDWIDTH_NAMES[(cmd[1] & 0xE0) >> 5]
self.putx(0, 3, ['Bandwidth: %s' % bandwidth])
LNA_GAIN_NAMES = [0, -6, -14, -20]
lna_gain = LNA_GAIN_NAMES[(cmd[1] & 0x18) >> 3]
self.putx(0, 2, ['LNA gain: %ddB' % lna_gain])
RSSI_THRESHOLD_NAMES = ['-103', '-97', '-91', '-85', '-79', '-73',
'Reserved', 'Reserved']
rssi_threshold = RSSI_THRESHOLD_NAMES[cmd[1] & 0x7]
self.putx(0, 3, ['RSSI threshold: %s' % rssi_threshold])
def handle_data_filter_cmd(self, cmd, ret):
self.putx(0, 8, ['Data filter command'])
if cmd[1] & 0x80:
clock_recovery = 'auto'
elif cmd[1] & 0x40:
clock_recovery = 'fast'
else:
clock_recovery = 'slow'
self.putx(0, 2, ['Clock recovery: %s mode' % clock_recovery])
self.advance_ann(0, 1) # Should always be 1.
s = 'analog' if (cmd[1] & 0x10) else 'digital'
self.putx(0, 1, ['Data filter: ' + s])
self.advance_ann(0, 1) # Should always be 1.
self.putx(0, 3, ['DQD threshold: %d' % (cmd[1] & 0x7)])
def handle_fifo_and_reset_cmd(self, cmd, ret):
self.putx(0, 8, ['FIFO and reset command'])
fifo_level = (cmd[1] & 0xF0) >> 4
self.putx(0, 4, ['FIFO trigger level: %d' % fifo_level])
last_fifo_level = (self.last_fifo_and_reset & 0xF0) >> 4
if fifo_level != last_fifo_level:
self.putx(5, 4, ['Changing', '~'])
else:
self.advance_ann(5, 4)
s = 'one byte' if (cmd[1] & 0x08) else 'two bytes'
self.putx(0, 1, ['Synchron length: ' + s])
if (cmd[1] & 0x08) != (self.last_fifo_and_reset & 0x08):
self.putx(5, 1, ['Changing', '~'])
else:
self.advance_ann(5, 1)
if cmd[1] & 0x04:
fifo_fill = 'Always'
elif cmd[1] & 0x02:
fifo_fill = 'After synchron pattern'
else:
fifo_fill = 'Never'
self.putx(0, 2, ['FIFO fill: %s' % fifo_fill])
if (cmd[1] & 0x06) != (self.last_fifo_and_reset & 0x06):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
s = 'non-sensitive' if (cmd[1] & 0x01) else 'sensitive'
self.putx(0, 1, ['Reset mode: ' + s])
if (cmd[1] & 0x01) != (self.last_fifo_and_reset & 0x01):
self.putx(5, 1, ['Changing', '~'])
else:
self.advance_ann(5, 1)
self.last_fifo_and_reset = cmd[1]
def handle_synchron_pattern_cmd(self, cmd, ret):
self.putx(0, 8, ['Synchron pattern command'])
if self.last_fifo_and_reset & 0x08:
self.putx(0, 8, ['Pattern: 0x2D%02X' % pattern])
else:
self.putx(0, 8, ['Pattern: %02X' % pattern])
def handle_fifo_read_cmd(self, cmd, ret):
self.putx(0, 8, ['FIFO read command', 'FIFO read'])
self.putx(3, 8, ['Data: %02X' % ret[1]])
def handle_afc_cmd(self, cmd, ret):
self.putx(0, 8, ['AFC command'])
MODES = ['Off', 'Once', 'During receiving', 'Always']
mode = (cmd[1] & 0xC0) >> 6
self.putx(0, 2, ['Mode: %s' % MODES[mode]])
if (cmd[1] & 0xC0) != (self.last_afc & 0xC0):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
range_limit = (cmd[1] & 0x30) >> 4
FREQ_TABLE = [0.0, 2.5, 5.0, 7.5]
freq_delta = FREQ_TABLE[(self.last_config & 0x30) >> 4]
if range_limit == 0:
self.putx(0, 2, ['Range: No limit'])
elif range_limit == 1:
self.putx(0, 2, ['Range: +/-%dkHz' % (15 * freq_delta)])
elif range_limit == 2:
self.putx(0, 2, ['Range: +/-%dkHz' % (7 * freq_delta)])
elif range_limit == 3:
self.putx(0, 2, ['Range: +/-%dkHz' % (3 * freq_delta)])
if (cmd[1] & 0x30) != (self.last_afc & 0x30):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
NAMES = ['Strobe edge', 'High accuracy mode', 'Enable offset register',
'Enable offset calculation']
self.describe_bits(cmd[1] & 0xF, NAMES)
self.describe_changed_bits(cmd[1] & 0xF, self.last_afc & 0xF, NAMES)
self.last_afc = cmd[1]
def handle_transceiver_control_cmd(self, cmd, ret):
self.putx(0, 8, ['Transceiver control command'])
self.putx(0, 4, ['FSK frequency delta: %dkHz' % (15 * ((cmd[1] & 0xF0) >> 4))])
if cmd[1] & 0xF0 != self.last_transceiver & 0xF0:
self.putx(5, 4, ['Changing', '~'])
else:
self.advance_ann(5, 4)
POWERS = [0, -2.5, -5, -7.5, -10, -12.5, -15, -17.5]
self.advance_ann(0, 1)
self.advance_ann(5, 1)
self.putx(0,3, ['Relative power: %dB' % (cmd[1] & 0x07)])
if (cmd[1] & 0x07) != (self.last_transceiver & 0x07):
self.putx(5, 3, ['Changing', '~'])
else:
self.advance_ann(5, 3)
self.last_transceiver = cmd[1]
def handle_pll_setting_cmd(self, cmd, ret):
self.putx(0, 8, ['PLL setting command'])
self.advance_ann(0, 1)
self.putx(0, 2, ['Clock buffer rise and fall time'])
self.advance_ann(0, 1)
self.advance_ann(5, 4)
NAMES = [['Delay in phase detector', 'dly'], ['Disable dithering', 'ddit']]
self.describe_bits((cmd[1] & 0xC) >> 2, NAMES)
self.describe_changed_bits((cmd[1] & 0xC) >> 2, (self.last_pll & 0xC) >> 2, NAMES)
s = '256kbps, high' if (cmd[1] & 0x01) else '86.2kbps, low'
self.putx(0, 1, ['Max bit rate: %s noise' % s])
self.advance_ann(5, 1)
if (cmd[1] & 0x01) != (self.last_pll & 0x01):
self.putx(5, 1, ['Changing', '~'])
self.last_pll = cmd[1]
def handle_transmitter_register_cmd(self, cmd, ret):
self.putx(0, 8, ['Transmitter register command', 'Transmit'])
self.putx(0, 8, ['Data: %s' % cmd[1], '%s' % cmd[1]])
def handle_software_reset_cmd(self, cmd, ret):
self.putx(0, 16, ['Software reset command'])
def handle_wake_up_timer_cmd(self, cmd, ret):
self.putx(0, 3, ['Wake-up timer command', 'Timer'])
r = cmd[0] & 0x1F
m = cmd[1]
time = 1.03 * m * pow(2, r) + 0.5
self.putx(0, 13, ['Time: %7.2f' % time])
def handle_low_duty_cycle_cmd(self, cmd, ret):
self.putx(0, 16, ['Low duty cycle command'])
def handle_low_battery_detector_cmd(self, cmd, ret):
self.putx(0, 8, ['Low battery detector command'])
NAMES = ['1', '1.25', '1.66', '2', '2.5', '3.33', '5', '10']
clock = NAMES[(cmd[1] & 0xE0) >> 5]
self.putx(0, 3, ['Clock output: %sMHz' % clock, '%sMHz' % clock])
self.advance_ann(0, 1)
v = 2.25 + (cmd[1] & 0x0F) * 0.1
self.putx(0, 4, ['Low battery voltage: %1.2fV' % v, '%1.2fV' % v])
def handle_status_read_cmd(self, cmd, ret):
self.putx(0, 8, ['Status read command', 'Status'])
NAMES = ['RGIT/FFIT', 'POR', 'RGUR/FFOV', 'WKUP', 'EXT', 'LBD',
'FFEM', 'RSSI/ATS', 'DQD', 'CRL', 'ATGL']
status = (ret[0] << 3) + (ret[1] >> 5)
self.row_pos[1] -= 8
self.row_pos[2] -= 8
self.describe_return_bits(status, NAMES)
receiver_enabled = (self.last_power & 0x80) >> 7
if ret[0] & 0x80:
if receiver_enabled:
s = 'Received data in FIFO'
else:
s = 'Transmit register ready'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[0] & 0x40:
self.putx(5, 1, 'Power on Reset')
else:
self.advance_ann(5, 1)
if ret[0] & 0x20:
if receiver_enabled:
s = 'RX FIFO overflow'
else:
s = 'Transmit register under run'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[0] & 0x10:
self.putx(5, 1, 'Wake-up timer')
else:
self.advance_ann(5, 1)
if ret[0] & 0x08:
self.putx(5, 1, 'External interrupt')
else:
self.advance_ann(5, 1)
if ret[0] & 0x04:
self.putx(5, 1, 'Low battery')
else:
self.advance_ann(5, 1)
if ret[0] & 0x02:
self.putx(5, 1, 'FIFO is empty')
else:
self.advance_ann(5, 1)
if ret[0] & 0x01:
if receiver_enabled:
s = 'Incoming signal above limit'
else:
s = 'Antenna detected RF signal'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[1] & 0x80:
self.putx(5, 1, 'Data quality detector')
else:
self.advance_ann(5, 1)
if ret[1] & 0x40:
self.putx(5, 1, 'Clock recovery locked')
else:
self.advance_ann(5, 1)
self.advance_ann(5, 1)
self.putx(3, 5, ['AFC offset'])
if (self.last_status[1] & 0x1F) != (ret[1] & 0x1F):
self.putx(5, 5, ['Changed', '~'])
self.last_status = ret
def handle_cmd(self, cmd, ret):
if cmd[0] == 0x80:
self.handle_configuration_cmd(cmd, ret)
elif cmd[0] == 0x82:
self.handle_power_management_cmd(cmd, ret)
elif cmd[0] & 0xF0 == 0xA0:
self.handle_frequency_setting_cmd(cmd, ret)
elif cmd[0] == 0xC6:
self.handle_data_rate_cmd(cmd, ret)
elif cmd[0] & 0xF8 == 0x90:
self.handle_receiver_control_cmd(cmd, ret)
elif cmd[0] == 0xC2:
self.handle_data_filter_cmd(cmd, ret)
elif cmd[0] == 0xCA:
self.handle_fifo_and_reset_cmd(cmd, ret)
elif cmd[0] == 0xCE:
self.handle_synchron_pattern_cmd(cmd, ret)
elif cmd[0] == 0xB0:
self.handle_fifo_read_cmd(cmd, ret)
elif cmd[0] == 0xC4:
self.handle_afc_cmd(cmd, ret)
elif cmd[0] & 0xFE == 0x98:
self.handle_transceiver_control_cmd(cmd, ret)
elif cmd[0] == 0xCC:
self.handle_pll_setting_cmd(cmd, ret)
elif cmd[0] == 0xB8:
self.handle_transmitter_register_cmd(cmd, ret)
elif cmd[0] == 0xFE:
self.handle_software_reset_cmd(cmd, ret)
elif cmd[0] & 0xE0 == 0xE0:
self.handle_wake_up_timer_cmd(cmd, ret)
elif cmd[0] == 0xC8:
self.handle_low_duty_cycle_cmd(cmd, ret)
elif cmd[0] == 0xC0:
self.handle_low_battery_detector_cmd(cmd, ret)
elif cmd[0] == 0x00:
self.handle_status_read_cmd(cmd, ret)
else:
c = '%02x %02x' % tuple(cmd)
r = '%02x %02x' % tuple(ret)
self.putx(0, 16, ['Unknown command: %s (reply: %s)!' % (c, r)])
def decode(self, ss, es, data):
ptype, mosi, miso = data
# For now, only use DATA and BITS packets.
if ptype not in ('DATA', 'BITS'):
return
# Store the individual bit values and ss/es numbers. The next packet
# is guaranteed to be a 'DATA' packet belonging to this 'BITS' one.
if ptype == 'BITS':
if mosi is not None:
self.mosi_bits.extend(reversed(mosi))
if miso is not None:
self.miso_bits.extend(reversed(miso))
return
# Append new bytes.
self.mosi_bytes.append(mosi)
self.miso_bytes.append(miso)
# All commands consist of 2 bytes.
if len(self.mosi_bytes) < 2:
return
self.row_pos = [0, 8, 8]
self.handle_cmd(self.mosi_bytes, self.miso_bytes)
self.mosi_bytes, self.miso_bytes = [], []
self.mosi_bits, self.miso_bits = [], []
|
zeldin/libsigrokdecode
|
decoders/rfm12/pd.py
|
Python
|
gpl-3.0
| 18,323
|
[
"CRYSTAL"
] |
88da09fb3369ae61a9eb38564c34e266cf1bf829ccd31105adc85bb586406f91
|
# Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# standard library
import sys
import string
import copy
import array
import os
import re
import sgmllib
import urlparse
# XML from python 2.0
from xml.sax import handler
# Martel
import Martel
from Martel import RecordReader
from Bio.ParserSupport import EventGenerator
from Bio.ParserSupport import AbstractConsumer
from Bio import File
from Bio.Align.Generic import Alignment
import Bio.Alphabet
import ecell_format
import Record
"""
Ecell converts the ECell input from spreadsheet format to an intermediate format, described in
http://www.e-cell.org/manual/chapter2E.html#3.2. It provides an alternative to the perl script
supplied with the Ecell2 distribution at http://bioinformatics.org/project/?group_id=49.
ECell expects a spreadsheet exported in delimited text format. The file should be read with
FilteredReader using the default filter chain to remove extraneous characters.
"""
class Error( Exception ):
"""
"""
def __init__( self ):
pass
class ECellError( Error ):
"""
message - description of error
"""
def __init__( self, message ):
self.message = message
class Iterator:
"""Iterator interface to move over a file of ecell entries one at a time.
"""
def __init__(self, handle, parser = None):
"""Initialize the iterator.
Arguments:
o handle - A handle with ECell entries to iterate through.
o parser - An optional parser to pass the entries through before
returning them. If None, then the raw entry will be returned.
"""
self.handle = File.UndoHandle( handle )
self._reader = RecordReader.Everything( self.handle )
self._parser = parser
def next(self):
"""Return the next ecell record from the handle.
Will return None if we ran out of records.
"""
data = self._reader.next()
if self._parser is not None:
if data:
dumpfile = open( 'dump', 'w' )
dumpfile.write( data )
dumpfile.close()
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class _Scanner:
"""Start up Martel to do the scanning of the file.
This initialzes the Martel based parser and connects it to a handler
that will generate events for a Feature Consumer.
"""
def __init__(self, debug = 0):
"""Initialize the scanner by setting up our caches.
Creating the parser takes a long time, so we want to cache it
to reduce parsing time.
Arguments:
o debug - The level of debugging that the parser should
display. Level 0 is no debugging, Level 2 displays the most
debugging info (but is much slower). See Martel documentation
for more info on this.
"""
# a listing of all tags we are interested in scanning for
# in the MartelParser
self.interest_tags = [ 'header_line', 'system_line', 'substance_multiline', \
'reactor_multiline', 'include_line' ]
# make a parser that returns only the tags we are interested in
expression = Martel.select_names( ecell_format.ecell_record, self.interest_tags)
self._parser = expression.make_parser(debug_level = debug)
def feed(self, handle, consumer):
"""Feed a set of data into the scanner.
Arguments:
o handle - A handle with the information to parse.
o consumer - The consumer that should be informed of events.
"""
self._parser.setContentHandler( EventGenerator(consumer,
self.interest_tags))
# self._parser.setErrorHandler(handle.ErrorHandler())
self._parser.parseFile(handle)
class _RecordConsumer:
"""Create an ECell Record object from scanner generated information.
"""
def __init__(self):
self.data = Record.Record()
self._header = []
self._database = {}
self._state = ''
def include_line( self, line ):
self.data.include_buf = self.data.include_buf + line
def header_line( self, lines ):
for line in lines:
items = line.split( '\t')
items[ 0 ] = items[ 0 ].lower()
self._header = []
self._state = items[ 0 ]
for item in items:
item = item.strip()
self._header.append( item.lower() )
def system_line( self, lines ):
for line in lines:
line_dict = self._make_line_dict( line )
if( not self._check_missing_header( line_dict ) ):
raise EcellError( "invalid header" )
self.data.num_systems = self.data.num_systems + 1
_set_defaults( line_dict )
self._build_system_entry( line_dict )
def substance_multiline( self, multiline ):
for line in multiline:
self.parse_substance_lines( line )
def parse_substance_lines( self, multiline ):
lines = multiline.splitlines()
line_no = 0
for line in lines:
line_dict = self._make_line_dict( line )
try:
if( not _is_valid_substance( line_dict ) ):
raise ECellError( "quantity and concentration are mutually exclusive" )
except ECellError, e:
print sys.stderr, e.message
qty = Record.get_entry( line_dict, 'qty' )
conc = Record.get_entry( line_dict, 'conc' )
if( ( qty.lower() != 'fix' ) and ( conc.lower() != 'fix' ) ):
self.data.num_substances = self.data.num_substances + 1
else:
line_no = line_no + 1
if( line.lower().startswith( 'substance' ) ):
_set_defaults( line_dict )
self._convert_conc( line_dict )
self._build_substance_entry( line_dict, line_no )
def reactor_multiline( self, multiline ):
for line in multiline:
self.parse_reactor_lines( line )
def parse_reactor_lines( self, multiline ):
lines = multiline.splitlines()
for line in lines:
line_dict = self._make_line_dict( line )
if( line.lower().startswith( 'reactor' ) ):
if( not self._check_missing_header( line_dict ) ):
raise ECellError( "invalid header" )
try:
if( not is_only_digits( line_dict[ 's_coeff' ] ) ):
raise ECellError( 's_coeff must contain only digits' )
if( not is_only_digits( line_dict[ 'p_coeff' ] ) ):
raise ECellError( 'p_coeff must contain only digits' )
except KeyError:
pass
if( line.lower().startswith( 'reactor' ) ):
_set_reactor_defaults( line_dict )
line_dict = self._remove_if_inconsistent( line_dict )
if( line_dict.has_key( 'class' ) ):
self.data.num_reactors = self.data.num_reactors + 1
num_substrates = 0
num_products = 0
num_catalysts = 0
num_effectors = 0
num_options = 0
num_args = 0
if( line_dict.has_key( 's_id' ) ): num_substrates = num_substrates + 1
if( line_dict.has_key( 'p_id' ) ): num_products = num_products + 1
if( line_dict.has_key( 'c_id' ) ): num_catalysts = num_catalysts + 1
if( line_dict.has_key( 'e_id' ) ): num_effectors = num_effectors + 1
if( line_dict.has_key( 'o_type' ) ): num_options = num_options + 1
if( line_dict.has_key( 'arg_tag' ) ): num_args = num_args + 1
counter_dict = { \
's_' : num_substrates, \
'p_' : num_products, \
'c_' : num_catalysts, \
'e_' : num_effectors, \
'o_' : num_options, \
'arg_tag' : num_args
}
self._set_max( counter_dict )
self._build_reactor_entry( line_dict, counter_dict )
def _set_max( self, counter_dict ):
num_reactors = self.data.num_reactors
for key in counter_dict.keys():
composite_key = key + str( num_reactors )
self.data._max_dict[ composite_key ] = counter_dict[ key ]
def _build_system_entry( self, line_dict ):
for key in line_dict.keys():
item = line_dict[ key ]
composite_key = 'system' + str( self.data.num_systems ) + key + '0'
if( not self.data.cell_dict.has_key( composite_key ) ):
self.data.cell_dict[ composite_key ] = item
def _build_substance_entry( self, line_dict, line_no ):
for key in line_dict.keys():
item = line_dict[ key ]
composite_key = 'substance' + str( self.data.num_substances ) + key + \
str( line_no )
if( not self.data.cell_dict.has_key( composite_key ) ):
self.data.cell_dict[ composite_key ] = item
def _convert_conc( self, line_dict ):
if( line_dict.has_key( 'conc' ) ):
if( not line_dict.has_key( 'qty' ) ):
contents = 'QTY(%s,%s)' % ( line_dict[ 'conc' ], line_dict[ 'path' ] )
composite_key = 'substance' + str( self.data.num_substances ) + 'qty' + '0'
self.data.cell_dict[ composite_key ] = contents
self.data.contains_concentration = 1
def _build_reactor_entry( self, line_dict, counter_dict ):
for key in line_dict.keys():
item = line_dict[ key ]
prefix = key[ :2 ]
if( key.startswith( 'arg_' ) ):
index = counter_dict[ 'arg_tag' ]
elif( counter_dict.has_key( prefix ) ):
index = counter_dict[ prefix ]
else:
index = '0'
composite_key = 'reactor' + str( self.data.num_reactors ) + str( key ) + str( index )
if( not self.data.cell_dict.has_key( composite_key ) ):
self.data.cell_dict[ composite_key ] = item
def _check_missing_header( self, line_dict ):
ok = 1
items = [ 'id', 'path', 'class' ]
for item in items:
if( line_dict.has_key( item ) == 0 ):
others = copy.deepcopy( items )
others.remove( item )
for other in others:
if( line_dict.has_key( other ) ):
if( item.lower() != 'class' ):
ok = 0
break
return ok
def _remove_if_inconsistent( self, list_dict ):
valid_keys = list_dict.keys()
for label in [ 'id', 'path', 'type' ]:
for prefix in [ 's_', 'p_', 'c_', 'e_' ]:
node = prefix + label
valid_keys = self._consistency_filter( prefix, node, valid_keys )
for key in list_dict.keys():
if( not key in valid_keys ):
del list_dict[ key ]
return list_dict
def _consistency_filter( self, prefix, tag, valid_keys ):
block = []
for suffix in [ 'id', 'path', 'coeff', 'type' ]:
node = prefix + suffix
block.append( node )
for node in block:
if( ( not tag in valid_keys ) and ( node in valid_keys ) ):
if( ( prefix == 'o_' ) or ( not tag.endswith( 'type' ) ) ):
valid_keys.remove( node )
return valid_keys
def _make_line_dict( self, line ):
line_dict = {}
items = line.split( '\t' )
num = 0
for item in items:
item = item.strip()
if( item != '' ):
line_dict[ self._header[ num ] ] = item
num = num + 1
return line_dict
def _clear_bad_block( block, items ):
for label in block:
items = items.remove( items.index( label ) )
return items
def _is_valid_substance( line_dict ):
ok = 1
if( line_dict.has_key( 'qty' ) and line_dict.has_key( 'conc' ) ):
if( not ( line_dict[ 'qty' ] == 'QTY' ) ):
ok = 0
return ok
def is_only_digits( line ):
ok = 1
text = line.strip()
if( text != '' ):
if( not text.isdigit() ):
ok = 0
return ok
def _set_reactor_defaults( line_dict ):
line_dict = _set_defaults( line_dict )
for item in [ 's_', 'p_', 'c_', 'e_' ]:
id = item + 'id'
coeff = item + 'coeff'
path = item + 'path'
if( line_dict.has_key( id ) ):
if( not line_dict.has_key( coeff ) ):
line_dict[ coeff ] = 1
if( not line_dict.has_key( path ) ):
line_dict[ path ] = line_dict[ 'path' ]
return( line_dict )
def _set_defaults( line_dict ):
if( not line_dict.has_key( 'name' ) ):
line_dict[ 'name' ] = line_dict[ 'id' ]
if( line_dict.has_key( 'arg_tag' ) ):
if( not line_dict.has_key( 'arg_coeff' ) ):
line_dict[ 'arg_coeff' ] = 0
return( line_dict )
class RecordParser:
"""Parse ECell files into Record objects
"""
def __init__(self, debug_level = 0):
"""Initialize the parser.
Arguments:
o debug_level - An optional argument that specifies the amount of
debugging information Martel should spit out. By default we have
no debugging info (the fastest way to do things), but if you want
you can set this as high as two and see exactly where a parse fails.
"""
self._scanner = _Scanner(debug_level)
def parse(self, handle):
"""Parse the specified handle into an ECell record.
"""
self._consumer = _RecordConsumer()
self._scanner.feed(handle, self._consumer)
return self._consumer.data
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/ECell/__init__.py
|
Python
|
apache-2.0
| 14,130
|
[
"Biopython"
] |
ac4878ac8fcbb41c99aac55f4dffcdd43583e437db701b537d6628b930c369c2
|
from spectral_cube import SpectralCube
from astropy.io import fits
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
from scipy.optimize import curve_fit
from scipy import *
import time
import pprocess
from astropy.convolution import convolve
import radio_beam
import sys
def gauss_fitter(region = 'Cepheus_L1251', snr_min = 3.0, mol = 'C2S', vmin = 5.0, vmax=10.0, convolve=False, use_old_conv=False, multicore = 1, file_extension = None):
"""
Fit a Gaussian to non-NH3 emission lines from GAS.
It creates a cube for the best-fit Gaussian, a cube
for the best-fit Gaussian with noise added back into
the spectrum, and a parameter map of Tpeak, Vlsr, and FWHM
Parameters
----------
region : str
Name of region to reduce
snr_min : float
Lowest signal-to-noise pixels to include in the line-fitting
mol : str
name of molecule to fit
vmin : numpy.float
Minimum centroid velocity, in km/s.
vmax : numpy.float
Maximum centroid velocity, in km/s.
convolve : bool or float
If not False, specifies the beam-size to convolve the original map with
Beam-size must be given in arcseconds
use_old_conv : bool
If True, use an already convolved map with name:
region + '_' + mol + file_extension + '_conv.fits'
This convolved map must be in units of km/s
multicore : int
Maximum number of simultaneous processes desired
file_extension: str
filename extension
"""
if file_extension:
root = file_extension
else:
# root = 'base{0}'.format(blorder)
root = 'all'
molecules = ['C2S', 'HC7N_22_21', 'HC7N_21_20', 'HC5N']
MolFile = '{0}/{0}_{2}_{1}.fits'.format(region,root,mol)
ConvFile = '{0}/{0}_{2}_{1}_conv.fits'.format(region,root,mol)
GaussOut = '{0}/{0}_{2}_{1}_gauss_cube.fits'.format(region,root,mol)
GaussNoiseOut = '{0}/{0}_{2}_{1}_gauss_cube_noise.fits'.format(region,root,mol)
ParamOut = '{0}/{0}_{2}_{1}_param_cube.fits'.format(region,root,mol)
# Load the spectral cube and convert to velocity units
cube = SpectralCube.read(MolFile)
cube_km = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
# If desired, convolve map with larger beam
# or load previously created convolved cube
if convolve:
cube = SpectralCube.read(MolFile)
cube_km_1 = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
beam = radio_beam.Beam(major=convolve*u.arcsec, minor=convolve*u.arcsec, pa=0*u.deg)
cube_km = cube_km_1.convolve_to(beam)
cube_km.write(ConvFile, format='fits', overwrite=True)
if use_old_conv:
cube_km = SpectralCube.read(ConvFile)
# Define the spectral axis in km/s
spectra_x_axis_kms = np.array(cube_km.spectral_axis)
# Find the channel range corresponding to vmin and vmax
# -- This is a hold-over from when I originally set up the code to
# use a channel range rather than velocity range.
# Can change later, but this should work for now.
low_channel = np.where(spectra_x_axis_kms<=vmax)[0][0]+1 # Add ones to change index to channel
high_channel = np.where(spectra_x_axis_kms>=vmin)[0][-1]+1 # Again, hold-over from older setup
peak_channels = [low_channel, high_channel]
# Create cubes for storing the fitted Gaussian profiles
# and the Gaussians with noise added back into the spectrum
header = cube_km.header
cube_gauss = np.array(cube_km.unmasked_data[:,:,:])
cube_gauss_noise = np.array(cube_km.unmasked_data[:,:,:])
shape = np.shape(cube_gauss)
# Set up a cube for storing fitted parameters
param_cube = np.zeros(6, shape[1], shape[2])
param_header = cube_km.header
# Define the Gaussian profile
def p_eval(x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
# Create some arrays full of NANs
# To be used in output cubes if fits fail
nan_array=np.empty(shape[0]) # For gauss cubes
nan_array[:] = np.NAN
nan_array2=np.empty(param_cube.shape[0]) # For param cubes
nan_array2[:] = np.NAN
# Loop through each pixel and find those
# with SNR above snr_min
x = []
y = []
pixels = 0
for (i,j), value in np.ndenumerate(cube_gauss[0]):
spectra=np.array(cube_km.unmasked_data[:,i,j])
if (False in np.isnan(spectra)):
rms = np.nanstd(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
if (max(spectra[peak_channels[0]:peak_channels[1]]) / rms) > snr_min:
pixels+=1
x.append(i)
y.append(j)
else:
cube_gauss[:,i,j]=nan_array
param_cube[:,i,j]=nan_array2
cube_gauss_noise[:,i,j]=nan_array
print str(pixels) + ' Pixels above SNR=' + str(snr_min)
# Define a Gaussian fitting function for each pixel
# i, j are the x,y coordinates of the pixel being fit
def pix_fit(i,j):
spectra = np.array(cube_km.unmasked_data[:,i,j])
# Use the peak brightness Temp within specified channel
# range as the initial guess for Gaussian height
max_ch = np.argmax(spectra[peak_channels[0]:peak_channels[1]])
Tpeak = spectra[peak_channels[0]:peak_channels[1]][max_ch]
# Use the velocity of the brightness Temp peak as
# initial guess for Gaussian mean
vpeak = spectra_x_axis_kms[peak_channels[0]:peak_channels[1]][max_ch]
rms = np.std(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
err1 = np.zeros(shape[0])+rms
# Create a noise spectrum based on rms of off-line channels
# This will be added to best-fit Gaussian to obtain a noisy Gaussian
noise=np.random.normal(0.,rms,len(spectra_x_axis_kms))
# Define initial guesses for Gaussian fit
guess = [Tpeak, vpeak, 0.3] # [height, mean, sigma]
try:
coeffs, covar_mat = curve_fit(p_eval, xdata=spectra_x_axis_kms, ydata=spectra, p0=guess, sigma=err1, maxfev=500)
gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))
noisy_gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))+noise
params = np.append(coeffs, (covar_mat[0][0]**0.5, covar_mat[1][1]**0.5, covar_mat[2][2]**0.5))
# params = ['Tpeak', 'VLSR','sigma','Tpeak_err','VLSR_err','sigma_err']
# Don't accept fit if fitted parameters are non-physical or too uncertain
if (params[0] < 0.01) or (params[3] > 1.0) or (params[2] < 0.05) or (params[5] > 0.5) or (params[4] > 0.75):
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
# Don't accept fit if the SNR for fitted spectrum is less than SNR threshold
#if max(gauss)/rms < snr_min:
# noisy_gauss = nan_array
# gauss = nan_array
# params = nan_array2
except RuntimeError:
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
return i, j, gauss, params, noisy_gauss
# Parallel computation:
nproc = multicore # maximum number of simultaneous processes desired
queue = pprocess.Queue(limit=nproc)
calc = queue.manage(pprocess.MakeParallel(pix_fit))
tic=time.time()
counter = 0
# Uncomment to see some plots of the fitted spectra
#for i,j in zip(x,y):
#pix_fit(i,j)
#plt.plot(spectra_x_axis_kms, spectra, color='blue', drawstyle='steps')
#plt.plot(spectra_x_axis_kms, gauss, color='red')
#plt.show()
#plt.close()
# Begin parallel computations
# Store the best-fit Gaussians and parameters
# in their correct positions in the previously created cubes
for i,j in zip(x,y):
calc(i,j)
for i,j,gauss_spec,parameters,noisy_gauss_spec in queue:
cube_gauss[:,i,j]=gauss_spec
param_cube[:,i,j]=parameters
cube_gauss_noise[:,i,j]=noisy_gauss_spec
counter+=1
print str(counter) + ' of ' + str(pixels) + ' pixels completed \r',
sys.stdout.flush()
print "\n %f s for parallel computation." % (time.time() - tic)
# Save final cubes
# These will be in km/s units.
# Spectra will have larger values to the left, lower values to right
cube_final_gauss = SpectralCube(data=cube_gauss, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss.write(GaussOut, format='fits', overwrite=True)
cube_final_gauss_noise = SpectralCube(data=cube_gauss_noise, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss_noise.write(GaussNoiseOut, format='fits', overwrite=True)
# Construct appropriate header for param_cube
param_header['NAXIS3'] = len(nan_array2)
param_header['WCSAXES'] = 3
param_header['CRPIX3'] = 1
param_header['CDELT3'] = 1
param_header['CRVAL3'] = 0
param_header['PLANE1'] = 'Tpeak'
param_header['PLANE2'] = 'VLSR'
param_header['PLANE3'] = 'sigma'
param_header['PLANE5'] = 'Tpeak_err'
param_header['PLANE6'] = 'VLSR_err'
param_header['PLANE7'] = 'sigma_err'
fits.writeto(ParamOut, param_cube, header=param_header, clobber=True)
### Examples ###
# Fit the HC5N data in Cepheus_L1251, without convolution
#gauss_fitter(region = 'Cepheus_L1251', snr_min = 7.0, mol = 'HC5N', vmin=-6.3, vmax=-2.2, multicore=3)
# Convolve the HC5N data in Cepheus_L1251 to a spatial resolution of 64 arcseconds,
# then fit a Gaussian to all pixels above SNR=3
#gauss_fitter(region = 'Cepheus_L1251', direct = '/Users/jkeown/Desktop/GAS_dendro/', snr_min = 3.0, mol = 'HC5N', peak_channels = [402,460], convolve=64., use_old_conv=False)
|
jakeown/GAS
|
GAS/gauss_fit.py
|
Python
|
mit
| 9,175
|
[
"Gaussian"
] |
aed55e94d82060de7051acfad40d57f87bee4d8ec2161ce45741cb06edb51a66
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import shutil
import unittest
import subprocess
import time
import mooseutils
class TestrPostprocessorReader(unittest.TestCase):
"""
Test use of PostprocessorReader for loading/reloading csv files.
The PostprocessorReader is an extension of MooseDataFrame, so only the new functions are tested here.
"""
def setUp(self):
"""
Define the test filename.
"""
self._partial = os.path.abspath('../../test_files/white_elephant_jan_2016_partial.csv')
self._filename = os.path.abspath('../../test_files/white_elephant_jan_2016.csv')
self._keys = ['air_temp_low_24_hour_set_1', 'snow_depth_set_1']
def testBasic(self):
"""
Test that if a file exists it is loaded w/o error.
"""
# Test basic read
data = mooseutils.PostprocessorReader(self._filename)
self.assertEqual(self._filename, data.filename)
self.assertTrue(data)
# Key Testing
for k in self._keys:
self.assertTrue(k in data)
# Check data
x = data[self._keys]
self.assertEqual(x.loc[10][self._keys[0]], 2.12)
self.assertEqual(x.loc[10][self._keys[1]], 51.00)
def testCall(self):
"""
Test that operator() method is working.
"""
data = mooseutils.PostprocessorReader(self._filename)
# Single key
x = data[self._keys[0]]
self.assertEqual(x.loc[10], 2.12)
# Multiple keys
x = data[self._keys]
self.assertEqual(x.loc[10][self._keys[0]], 2.12)
self.assertEqual(x.loc[10][self._keys[1]], 51.00)
def testNewDataReload(self):
"""
Test that new data is loaded automatically.
"""
# Copy partial data
tmp = "{}.csv".format(self.__class__.__name__)
shutil.copyfile(self._partial, tmp)
# Load data and inspect
data = mooseutils.PostprocessorReader(tmp)
self.assertEqual(data.data.shape, (287,8))
# Wait and copy more data
time.sleep(1)
shutil.copyfile(self._filename, tmp)
data.update()
self.assertEqual(data.data.shape, (742,8))
os.remove(tmp)
def testVariables(self):
"""
Test the the variables names are being read.
"""
data = mooseutils.PostprocessorReader(self._filename)
self.assertTrue(data)
self.assertIn('time', data.variables())
for k in self._keys:
self.assertIn(k, data.variables())
def testRepr(self):
"""
Test the 'repr' method for writing scripts is working.
"""
# Load the files
data = mooseutils.PostprocessorReader(self._filename)
self.assertTrue(data)
# Get script text
output, imports = data.repr()
# Append testing content
output += ["print('SHAPE:', data.data.shape)"]
output += ["print('VALUE:', data['snow_depth_set_1'][10])"]
# Write the test script
script = '{}_repr.py'.format(self.__class__.__name__)
with open(script, 'w') as fid:
fid.write('\n'.join(imports))
fid.write('\n'.join(output))
# Run script
self.assertTrue(os.path.exists(script))
out = subprocess.check_output(['python', script])
# Test for output
self.assertIn('SHAPE: (742, 8)', out.decode())
self.assertIn('VALUE: 51', out.decode())
# Remove the script
os.remove(script)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2, buffer=True)
|
nuclear-wizard/moose
|
python/mooseutils/tests/test_PostprocessorReader.py
|
Python
|
lgpl-2.1
| 3,943
|
[
"MOOSE"
] |
d8e29714b5a1bb0751e7724eff5aa9a541b357e234ec9a21b26dc34ae5baeba9
|
# Copyright (C) 2012 Mathias Brodala
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import division
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
import cairo
from collections import namedtuple
import sys
from math import pi
from xl import (
event,
player,
settings
)
from xl.nls import gettext as _
from xl.player.adapters import PlaybackAdapter
from xlgui.widgets import info
import migration
from alphacolor import alphacolor_parse
import osd_preferences
OSDWINDOW = None
def enable(exaile):
"""
Enables the on screen display plugin
"""
migration.migrate_settings()
global OSDWINDOW
OSDWINDOW = OSDWindow()
def disable(exaile):
"""
Disables the on screen display plugin
"""
global OSDWINDOW
OSDWINDOW.destroy()
OSDWINDOW = None
def get_preferences_pane():
return osd_preferences
Point = namedtuple('Point', 'x y')
class OSDWindow(Gtk.Window, PlaybackAdapter):
"""
A popup window showing information
of the currently playing track
"""
autohide = GObject.property(
type=GObject.TYPE_BOOLEAN,
nick='autohide',
blurb='Whether to automatically hide the window after some time',
default=True,
flags=GObject.PARAM_READWRITE
)
__gsignals__ = {}
def __init__(self):
"""
Initializes the window
"""
Gtk.Window.__init__(self, Gtk.WindowType.TOPLEVEL)
# for whatever reason, calling set_opacity seems
# to crash on Windows when using PyGTK that comes with
# the GStreamer SDK. Since this plugin is enabled by
# default, just don't fade in/out on windows
#
# https://bugs.freedesktop.org/show_bug.cgi?id=54682
self.use_fade = True
if sys.platform == 'win32':
self.use_fade = False
self.fadeout_id = None
self.drag_origin = None
self.hide_id = None
self.set_type_hint(Gdk.WindowTypeHint.NOTIFICATION)
self.set_title('Exaile OSD')
self.set_decorated(False)
self.set_keep_above(True)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.set_resizable(True)
self.set_app_paintable(True)
self.stick()
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
# Cached option values
self.__options = {
'background': None,
'display_duration': None,
'border_radius': None
}
self.info_area = info.TrackInfoPane(player.PLAYER)
self.info_area.set_default_text('')
self.info_area.set_auto_update(True)
self.add(self.info_area)
event.add_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_callback(self.on_option_set, 'plugin_osd_option_set')
# Trigger initial setup trough options
for option in ('format', 'background', 'display_duration',
'show_progress', 'position', 'width', 'height',
'border_radius'):
self.on_option_set('plugin_osd_option_set', settings,
'plugin/osd/{option}'.format(option=option))
# Trigger color map update
self.emit('screen-changed', self.get_screen())
PlaybackAdapter.__init__(self, player.PLAYER)
def destroy(self):
"""
Cleanups
"""
event.remove_callback(self.on_option_set, 'plugin_osd_option_set')
event.remove_callback(self.on_track_tags_changed, 'track_tags_changed')
Gtk.Window.destroy(self)
def hide(self):
"""
Starts fadeout of the window
"""
if not self.use_fade:
Gtk.Window.hide(self)
return
if self.fadeout_id is None:
self.fadeout_id = GLib.timeout_add(50, self.__fade_out)
def show(self):
"""
Stops fadeout and immediately shows the window
"""
if self.use_fade:
try:
GLib.source_remove(self.fadeout_id)
except:
pass
finally:
self.fadeout_id = None
self.set_opacity(1)
Gtk.Window.show_all(self)
def __fade_out(self):
"""
Constantly decreases the opacity to fade out the window
"""
opacity = self.get_opacity()
if opacity == 0:
GLib.source_remove(self.fadeout_id)
self.fadeout_id = None
Gtk.Window.hide(self)
return False
self.set_opacity(opacity - 0.1)
return True
def do_notify(self, parameter):
"""
Triggers hiding if autohide is enabled
"""
if parameter.name == 'autohide':
if self.props.autohide:
self.hide()
def do_expose_event(self, event):
"""
Draws the background of the window
"""
context = self.props.window.cairo_create()
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
context.set_source_rgba(
self.__options['background'].red_float,
self.__options['background'].green_float,
self.__options['background'].blue_float,
self.__options['background'].alpha_float
)
context.set_operator(cairo.OPERATOR_SOURCE)
context.paint()
Gtk.Window.do_expose_event(self, event)
def do_screen_changed(self, screen):
"""
Updates the used colormap
"""
visual = screen.get_rgba_visual()
if visual is None:
visual = screen.get_system_visual()
self.unrealize()
self.set_visual(visual)
self.realize()
def do_size_allocate(self, allocation):
"""
Applies the non-rectangular shape
"""
width, height = allocation.width, allocation.height
mask = Gdk.Pixmap(None, width, height, 1)
context = mask.cairo_create()
context.set_source_rgb(0, 0, 0)
context.set_operator(cairo.OPERATOR_CLEAR)
context.paint()
radius = self.__options['border_radius']
inner = (radius, radius, width - radius, height - radius)
context.set_source_rgb(1, 1, 1)
context.set_operator(cairo.OPERATOR_SOURCE)
# Top left corner
context.arc(inner.x, inner.y, radius, 1.0 * pi, 1.5 * pi)
# Top right corner
context.arc(inner.width, inner.y, radius, 1.5 * pi, 2.0 * pi)
# Bottom right corner
context.arc(inner.width, inner.height, radius, 0.0 * pi, 0.5 * pi)
# Bottom left corner
context.arc(inner.x, inner.height, radius, 0.5 * pi, 1.0 * pi)
context.fill()
self.shape_combine_mask(mask, 0, 0)
Gtk.Window.do_size_allocate(self, allocation)
def do_configure_event(self, e):
"""
Stores the window size
"""
width, height = self.get_size()
settings.set_option('plugin/osd/width', width)
settings.set_option('plugin/osd/height', height)
Gtk.Window.do_configure_event(self, e)
def do_button_press_event(self, e):
"""
Starts the dragging process
"""
if e.button == 1:
self.drag_origin = Point(e.x, e.y)
self.window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.FLEUR))
return True
elif e.button == 3 and e.state & Gdk.ModifierType.MOD1_MASK:
self.begin_resize_drag(Gdk.WindowEdge.SOUTH_EAST, 3, int(e.x_root), int(e.y_root), e.time)
def do_button_release_event(self, e):
"""
Finishes the dragging process and
saves the window position
"""
if e.button == 1:
settings.set_option('plugin/osd/position', list(self.get_position()))
self.drag_origin = None
self.window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.ARROW))
return True
def do_motion_notify_event(self, e):
"""
Moves the window while dragging, makes sure
the window is always visible upon mouse hover
"""
drag_origin = self.drag_origin
if drag_origin is not None:
position = Point(e.x_root, e.y_root)
self.move(
int(position.x - drag_origin.x),
int(position.y - drag_origin.y)
)
try:
GLib.source_remove(self.hide_id)
except:
pass
finally:
self.hide_id = None
self.show()
def do_leave_notify_event(self, e):
"""
Hides the window upon mouse leave
"""
try:
GLib.source_remove(self.hide_id)
except:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
Gtk.Window.do_leave_notify_event(self, e)
def on_track_tags_changed(self, e, track, tag):
if not tag.startswith('__') and track == player.PLAYER.current:
self.on_playback_track_start(e, player.PLAYER, track)
def on_playback_track_start(self, e, player, track):
"""
Shows the OSD upon track change
"""
GLib.idle_add(self.show)
try:
GLib.source_remove(self.hide_id)
except:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_playback_toggle_pause(self, e, player, track):
"""
Shows the OSD after resuming playback
"""
if not player.is_playing(): return
GLib.idle_add(self.show)
try:
GLib.source_remove(self.hide_id)
except:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_playback_player_end(self, e, player, track):
"""
Hides the OSD upon playback end
"""
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_option_set(self, event, settings, option):
"""
Updates appearance on setting change
"""
if option == 'plugin/osd/format':
self.info_area.set_info_format(settings.get_option(option,
_('<span font_desc="Sans 11" foreground="#fff"><b>$title</b></span>\n'
'by $artist\n'
'from $album')
))
if option == 'plugin/osd/background':
self.__options['background'] = alphacolor_parse(settings.get_option(option, '#333333cc'))
GLib.idle_add(self.queue_draw)
elif option == 'plugin/osd/display_duration':
self.__options['display_duration'] = int(settings.get_option(option, 4))
elif option == 'plugin/osd/show_progress':
self.info_area.set_display_progress(settings.get_option(option, True))
elif option == 'plugin/osd/position':
position = Point._make(settings.get_option(option, [20, 20]))
GLib.idle_add(self.move, position.x, position.y)
elif option == 'plugin/osd/border_radius':
value = settings.get_option(option, 10)
self.set_border_width(max(6, int(value / 2)))
self.__options['border_radius'] = value
self.emit('size-allocate', self.get_allocation())
|
virtuald/exaile
|
plugins/osd/__init__.py
|
Python
|
gpl-2.0
| 12,754
|
[
"FLEUR"
] |
dba35b2d073c211ac50e2cd63c5184d6acbf594f3017cafa76e4581d68a7aba0
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
math = vtk.vtkMath()
# Generate some random colors
def MakeColors (lut, n):
lut.SetNumberOfColors(n)
lut.SetTableRange(0, n - 1)
lut.SetScaleToLinear()
lut.Build()
lut.SetTableValue(0, 0, 0, 0, 1)
math.RandomSeed(5071)
i = 1
while i < n:
lut.SetTableValue(i, math.Random(.2, 1),
math.Random(.2, 1), math.Random(.2, 1), 1)
i += 1
lut = vtk.vtkLookupTable()
MakeColors(lut, 256)
n = 20
radius = 10
# This has been moved outside the loop so that the code can be correctly
# translated to python
blobImage = vtk.vtkImageData()
i = 0
while i < n:
sphere = vtk.vtkSphere()
sphere.SetRadius(radius)
max = 50 - radius
sphere.SetCenter(int(math.Random(-max, max)),
int(math.Random(-max, max)), int(math.Random(-max, max)))
sampler = vtk.vtkSampleFunction()
sampler.SetImplicitFunction(sphere)
sampler.SetOutputScalarTypeToFloat()
sampler.SetSampleDimensions(51, 51, 51)
sampler.SetModelBounds(-50, 50, -50, 50, -50, 50)
thres = vtk.vtkImageThreshold()
thres.SetInputConnection(sampler.GetOutputPort())
thres.ThresholdByLower(radius * radius)
thres.ReplaceInOn()
thres.ReplaceOutOn()
thres.SetInValue(i + 1)
thres.SetOutValue(0)
thres.Update()
if (i == 0):
blobImage.DeepCopy(thres.GetOutput())
maxValue = vtk.vtkImageMathematics()
maxValue.SetInputData(0, blobImage)
maxValue.SetInputData(1, thres.GetOutput())
maxValue.SetOperationToMax()
maxValue.Modified()
maxValue.Update()
blobImage.DeepCopy(maxValue.GetOutput())
i += 1
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(blobImage)
discrete.GenerateValues(n, 1, n)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(discrete.GetOutputPort())
mapper.SetLookupTable(lut)
mapper.SetScalarRange(0, lut.GetNumberOfColors())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren1.AddActor(actor)
renWin.Render()
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/General/Testing/Python/TestDiscreteMarchingCubes.py
|
Python
|
gpl-3.0
| 2,317
|
[
"VTK"
] |
44276a71bb21ec1585f666124a6c47c037264211430ee14cca87c7e0d03e6a8b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
import argparse
import bioformats.bed
import bioformats.gff3
import csv
import logging
import os
import vcf
from chromosomer.fragment import AlignmentToMap
from chromosomer.fragment import SeqLengths
from chromosomer.fragment import Map
from chromosomer.fragment import Simulator
from chromosomer.fragment import agp2map
from chromosomer.transfer import BedTransfer
from chromosomer.transfer import Gff3Transfer
from chromosomer.transfer import VcfTransfer
from bioformats.blast import BlastTab
from os.path import splitext
from chromosomer.fragment import logger
def read_fragment_lengths(filename):
"""
Given a name of a file with fragment lengths, read them to a
dictionary.
:param filename: a name of a file with fragment lengths
:type filename: str
:return: a dictionary which keys are fragment sequence names and
values are their lengths
:rtype: dict
"""
result = dict()
with open(filename) as length_file:
length_reader = csv.reader(length_file, delimiter='\t')
for fragment, length in length_reader:
result[fragment] = int(length)
return result
def chromosomer():
"""
The main function that is run if Chromosomer was launched. It
defines a command-line parser which processed arguments passed to
the program.
"""
parser = argparse.ArgumentParser(
description='Reference-assisted chromosome assembly tool.')
subparsers = parser.add_subparsers(dest='command')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s 0.1.4')
parser.add_argument('-d', '--debug', action='store_true',
help='show debugging messages')
# Parser for the 'chromosomer assemble' part that produces a FASTA
# file of assembled chromosomes from the specified fragment map.
assemble_parser = subparsers.add_parser(
'assemble',
help='get sequences of assembled chromosomes',
description='Get the FASTA file of assembled chromosomes.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# required arguments for the 'assemble' routine
assemble_parser.add_argument('map',
help='a fragment map file')
assemble_parser.add_argument('fragment_fasta',
help='a FASTA file of fragment '
'sequences to be assembled')
assemble_parser.add_argument('output_fasta',
help='the output FASTA file of the '
'assembled chromosome sequences')
# optinal arguments for the 'assemble' routine
assemble_parser.add_argument('-s', '--save_soft_mask',
action='store_true',
help='keep soft masking from the '
'original fragment sequences')
# Parser for the 'chromosomer fragmentmap' part that
# produces a map of fragment positions on reference
# chromosomes from BLAST alignments of the fragments to the
# chromosomes.
fragmentmap_parser = subparsers.add_parser(
'fragmentmap',
description='Construct a fragment map from fragment '
'alignments to reference chromosomes.',
help='construct a fragment map from alignments',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# required arguments for the 'fragmentmap' routine
fragmentmap_parser.add_argument(
'alignment_file',
help='a BLAST tabular file of fragment alignments to '
'reference chromosomes'
)
fragmentmap_parser.add_argument(
'gap_size', type=int,
help='a size of a gap inserted between mapped fragments'
)
fragmentmap_parser.add_argument(
'fragment_lengths',
help='a file containing lengths of fragment sequences; it can '
'be obtained using the \'chromosomer fastalength\' tool'
)
fragmentmap_parser.add_argument(
'output_map',
help='an output fragment map file name'
)
# optional arguments for the 'fragmentmap' routine
fragmentmap_parser.add_argument(
'-r', '--ratio_threshold', type=float, default=1.2,
help='the least ratio of two greatest fragment alignment '
'scores to determine the fragment placed to a reference '
'genome'
)
fragmentmap_parser.add_argument(
'-s', '--shrink_gaps', action='store_true',
help='shrink large interfragment gaps to the specified size'
)
# Parser for the 'chromosomer fragmentmapstat' part that reports
# statistics on a fragment map
fragmentmapstat_parser = subparsers.add_parser(
'fragmentmapstat',
description='Show statistics on a fragment map.',
help='show fragment map statistics'
)
# required arguments for the 'fragmentmapstat' routine
fragmentmapstat_parser.add_argument('map',
help='a fragment map file')
fragmentmapstat_parser.add_argument('output',
help='an output file of '
'fragment map statistics')
# Parser for the 'chromosomer fragmentmapbed' part that converts
# a fragement map to the BED format
fragmentmapbed_parser = subparsers.add_parser(
'fragmentmapbed',
description='Convert a fragment map to the BED format.',
help='convert a fragment map to the BED format'
)
# required arguments for the 'fragmentmapbed' routine
fragmentmapbed_parser.add_argument('map',
help='a fragment map file')
fragmentmapbed_parser.add_argument('output',
help='an output BED file '
'representing the '
'fragment map')
# Parser for the 'chromosomer transfer' part that transfers
# genome feature annotation from fragments to their assembly
transfer_parser = subparsers.add_parser(
'transfer',
description='Transfer annotated genomic features from '
'fragments to their assembly.',
help='transfer annotated features from fragments to '
'chromosomes',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# required arguments for the 'transfer' routine
transfer_parser.add_argument('map',
help='a fragment map file')
transfer_parser.add_argument('annotation',
help='a file of annotated genome '
'features')
transfer_parser.add_argument('output',
help='an output file of the '
'transfered annotation')
# optional arguments for the 'transfer' routine
transfer_parser.add_argument('-f', '--format', default='bed',
choices=['bed', 'gff3', 'vcf'],
help='the format of a file of '
'annotated features (bed, '
'gff3 or vcf)')
# Parser for the 'chromosomer fastalength' part that calculates
# lengths of sequences in the given FASTA file.
fastalength_parser = subparsers.add_parser(
'fastalength',
description='Get lengths of sequences in the specified FASTA '
'file (required to build a fragment map).',
help='get lengths of sequences from a FASTA file',
)
# required arguments for the 'fastalength' routine
fastalength_parser.add_argument('fasta',
help='a FASTA file which sequence '
'lengths are to be obtained')
fastalength_parser.add_argument('output',
help='an output file of sequence '
'lengths')
# Parser for the 'chromosomer simulator' routine
simulator_parser = subparsers.add_parser(
'simulator',
description='Simulate fragments and test assembly for '
'testing purposes.',
help='fragment simulator for testing purposes'
)
# required arguments for the 'simulator' routine
simulator_parser.add_argument('fr_num', type=int,
help='the number of '
'chromosome fragments')
simulator_parser.add_argument('fr_len', type=int,
help='the length of fragments')
simulator_parser.add_argument('chr_num', type=int,
help='the number of chromosomes')
simulator_parser.add_argument('output_dir',
help='the directory for output files')
simulator_parser.add_argument('-g', '--gap_size', type=int,
default=2000,
help='the size of gaps between '
'fragments on a chromosome')
simulator_parser.add_argument('-p', '--unplaced', type=int,
help='the number of unplaced '
'fragments')
simulator_parser.add_argument('--prefix', default='',
help='the prefix for output file '
'names')
# Parser for the 'chromosomer agp2map' routine
agp2map_parser = subparsers.add_parser(
'agp2map',
description='Convert an AGP file to the fragment map format.',
help='convert an AGP file to a fragment map'
)
# required arguments for the 'agp2map' routine
agp2map_parser.add_argument('agp_file', help='an AGP file')
agp2map_parser.add_argument('output_file', help='the output '
'fragment map '
'file')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.propagate = False
formatter = logging.Formatter('%(asctime)-15s - %(message)s',
'%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logging.basicConfig()
cli_logger = logging.getLogger(__name__)
cli_logger.propagate = False
cli_logger.addHandler(ch)
cli_logger.setLevel(logging.INFO)
if args.command == 'assemble':
fragment_map = Map()
fragment_map.read(args.map)
fragment_map.assemble(args.fragment_fasta,
args.output_fasta,
args.save_soft_mask)
elif args.command == 'fragmentmap':
fragment_lengths = read_fragment_lengths(args.fragment_lengths)
map_creator = AlignmentToMap(args.gap_size, fragment_lengths)
with open(args.alignment_file) as alignment_file:
alignments = BlastTab(alignment_file)
fragment_map, unlocalized, unplaced = map_creator.blast(
alignments, args.ratio_threshold)
if args.shrink_gaps:
fragment_map.shrink_gaps(args.gap_size)
fragment_map.write(args.output_map)
# write unlocalized and unplaced fragments
with open(splitext(args.output_map)[0] + '_unlocalized.txt',
'w') as unlocalized_file:
for i in unlocalized:
unlocalized_file.write('{}\t{}\n'.format(*i))
with open(splitext(args.output_map)[0] + '_unplaced.txt',
'w') as unplaced_file:
for i in unplaced:
unplaced_file.write('{}\n'.format(i))
elif args.command == 'transfer':
total_count = transferred_count = 0
if args.format == 'bed':
transferrer = BedTransfer(args.map)
with open(args.annotation) as input_file:
with bioformats.bed.Writer(args.output) as output_file:
for feature in bioformats.bed.Reader(
input_file).records():
total_count += 1
transferred_feature = transferrer.feature(
feature)
if transferred_feature is not None:
transferred_count += 1
output_file.write(transferred_feature)
elif args.format == 'gff3':
transferrer = Gff3Transfer(args.map)
with open(args.annotation) as input_file:
with bioformats.gff3.Writer(args.output) as output_file:
for feature in bioformats.gff3.Reader(
input_file).records():
total_count += 1
transferred_feature = transferrer.feature(
feature)
if transferred_feature is not None:
transferred_count += 1
output_file.write(transferred_feature)
elif args.format == 'vcf':
transferrer = VcfTransfer(args.map)
reader = vcf.Reader(open(args.annotation))
writer = vcf.Writer(open(args.output, 'w'), reader)
for variant in reader:
total_count += 1
transferred_feature = transferrer.feature(variant)
if transferred_feature is not None:
transferred_count += 1
writer.write_record(transferred_feature)
writer.close()
logger.info('%d features transferred', transferred_count)
logger.info('%d features skipped',
total_count - transferred_count)
elif args.command == 'fastalength':
seq_lengths = SeqLengths(args.fasta)
with open(args.output, 'wt') as length_file:
length_writer = csv.writer(length_file, delimiter='\t')
for header, length in seq_lengths.lengths().iteritems():
length_writer.writerow((header, length, ))
elif args.command == 'simulator':
fr_simulator = Simulator(args.fr_len, args.fr_num,
args.chr_num, args.unplaced,
args.gap_size)
map_file = os.path.join(args.output_dir,
args.prefix + 'map.txt')
chr_file = os.path.join(args.output_dir,
args.prefix + 'chromosomes.fa')
fr_file = os.path.join(args.output_dir, args.prefix +
'fragments.fa')
fr_simulator.write(map_file, fr_file, chr_file)
elif args.command == 'fragmentmapstat':
fragment_map = Map()
fragment_map.read(args.map)
summary = fragment_map.summary()
template = '\t'.join(['{}'] * 4) + '\n'
with open(args.output, 'w') as output_file:
for chromosome in sorted(summary.keys()):
output_file.write(template.format(chromosome,
*summary[chromosome]))
elif args.command == 'fragmentmapbed':
fragment_map = Map()
fragment_map.read(args.map)
fragment_map.convert2bed(args.output)
elif args.command == 'agp2map':
agp2map(args.agp_file, args.output_file)
|
gtamazian/Chromosomer
|
chromosomer/cli.py
|
Python
|
mit
| 15,881
|
[
"BLAST"
] |
850a8373970cfcfe2ade8b1477155b693d8748cf32a78f28db427ce0dced49f4
|
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrapper for the LAMMPS library via ctypes
# for python2/3 compatibility
from __future__ import print_function
import os
import sys
from ctypes import * # lgtm [py/polluting-import]
from os.path import dirname,abspath,join
from inspect import getsourcefile
from .constants import * # lgtm [py/polluting-import]
from .data import * # lgtm [py/polluting-import]
# -------------------------------------------------------------------------
class MPIAbortException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
# -------------------------------------------------------------------------
class ExceptionCheck:
"""Utility class to rethrow LAMMPS C++ exceptions as Python exceptions"""
def __init__(self, lmp):
self.lmp = lmp
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if self.lmp.has_exceptions and self.lmp.lib.lammps_has_error(self.lmp.lmp):
raise self.lmp._lammps_exception
# -------------------------------------------------------------------------
class lammps(object):
"""Create an instance of the LAMMPS Python class.
.. _mpi4py_docs: https://mpi4py.readthedocs.io/
This is a Python wrapper class that exposes the LAMMPS C-library
interface to Python. It either requires that LAMMPS has been compiled
as shared library which is then dynamically loaded via the ctypes
Python module or that this module called from a Python function that
is called from a Python interpreter embedded into a LAMMPS executable,
for example through the :doc:`python invoke <python>` command.
When the class is instantiated it calls the :cpp:func:`lammps_open`
function of the LAMMPS C-library interface, which in
turn will create an instance of the :cpp:class:`LAMMPS <LAMMPS_NS::LAMMPS>`
C++ class. The handle to this C++ class is stored internally
and automatically passed to the calls to the C library interface.
:param name: "machine" name of the shared LAMMPS library ("mpi" loads ``liblammps_mpi.so``, "" loads ``liblammps.so``)
:type name: string
:param cmdargs: list of command line arguments to be passed to the :cpp:func:`lammps_open` function. The executable name is automatically added.
:type cmdargs: list
:param ptr: pointer to a LAMMPS C++ class instance when called from an embedded Python interpreter. None means load symbols from shared library.
:type ptr: pointer
:param comm: MPI communicator (as provided by `mpi4py <mpi4py_docs_>`_). ``None`` means use ``MPI_COMM_WORLD`` implicitly.
:type comm: MPI_Comm
"""
# -------------------------------------------------------------------------
# create an instance of LAMMPS
def __init__(self,name='',cmdargs=None,ptr=None,comm=None):
self.comm = comm
self.opened = 0
# determine module file location
modpath = dirname(abspath(getsourcefile(lambda:0)))
# for windows installers the shared library is in a different folder
winpath = abspath(os.path.join(modpath,'..','..','bin'))
self.lib = None
self.lmp = None
# if a pointer to a LAMMPS object is handed in
# when being called from a Python interpreter
# embedded into a LAMMPS executable, all library
# symbols should already be available so we do not
# load a shared object.
try:
if ptr: self.lib = CDLL("",RTLD_GLOBAL)
except OSError:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of the lammps package with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
# guess shared library extension based on OS, if not inferred from actual file
if any([f.startswith('liblammps') and f.endswith('.dylib')
for f in os.listdir(modpath)]):
lib_ext = ".dylib"
elif any([f.startswith('liblammps') and f.endswith('.dll')
for f in os.listdir(modpath)]):
lib_ext = ".dll"
elif os.path.exists(winpath) and any([f.startswith('liblammps') and f.endswith('.dll')
for f in os.listdir(winpath)]):
lib_ext = ".dll"
modpath = winpath
else:
import platform
if platform.system() == "Darwin":
lib_ext = ".dylib"
elif platform.system() == "Windows":
lib_ext = ".dll"
else:
lib_ext = ".so"
if not self.lib:
if name:
libpath = join(modpath,"liblammps_%s" % name + lib_ext)
else:
libpath = join(modpath,"liblammps" + lib_ext)
if not os.path.isfile(libpath):
if name:
libpath = "liblammps_%s" % name + lib_ext
else:
libpath = "liblammps" + lib_ext
self.lib = CDLL(libpath,RTLD_GLOBAL)
# declare all argument and return types for all library methods here.
# exceptions are where the arguments depend on certain conditions and
# then are defined where the functions are used.
self.lib.lammps_extract_setting.argtypes = [c_void_p, c_char_p]
self.lib.lammps_extract_setting.restype = c_int
# set default types
# needed in later declarations
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
self.lib.lammps_open.restype = c_void_p
self.lib.lammps_open_no_mpi.restype = c_void_p
self.lib.lammps_close.argtypes = [c_void_p]
self.lib.lammps_free.argtypes = [c_void_p]
self.lib.lammps_file.argtypes = [c_void_p, c_char_p]
self.lib.lammps_file.restype = None
self.lib.lammps_command.argtypes = [c_void_p, c_char_p]
self.lib.lammps_command.restype = c_char_p
self.lib.lammps_commands_list.restype = None
self.lib.lammps_commands_string.argtypes = [c_void_p, c_char_p]
self.lib.lammps_commands_string.restype = None
self.lib.lammps_get_natoms.argtypes = [c_void_p]
self.lib.lammps_get_natoms.restype = c_double
self.lib.lammps_extract_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),
POINTER(c_double),POINTER(c_double),POINTER(c_double),
POINTER(c_int),POINTER(c_int)]
self.lib.lammps_extract_box.restype = None
self.lib.lammps_reset_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),c_double,c_double,c_double]
self.lib.lammps_reset_box.restype = None
self.lib.lammps_gather_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms.restype = None
self.lib.lammps_gather_atoms_concat.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms_concat.restype = None
self.lib.lammps_gather_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_gather_atoms_subset.restype = None
self.lib.lammps_scatter_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_scatter_atoms.restype = None
self.lib.lammps_scatter_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_scatter_atoms_subset.restype = None
self.lib.lammps_gather_bonds.argtypes = [c_void_p,c_void_p]
self.lib.lammps_gather_bonds.restype = None
self.lib.lammps_gather.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather.restype = None
self.lib.lammps_gather_concat.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_concat.restype = None
self.lib.lammps_gather_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_gather_subset.restype = None
self.lib.lammps_scatter.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_scatter.restype = None
self.lib.lammps_scatter_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_scatter_subset.restype = None
self.lib.lammps_find_pair_neighlist.argtypes = [c_void_p, c_char_p, c_int, c_int, c_int]
self.lib.lammps_find_pair_neighlist.restype = c_int
self.lib.lammps_find_fix_neighlist.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_find_fix_neighlist.restype = c_int
self.lib.lammps_find_compute_neighlist.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_find_compute_neighlist.restype = c_int
self.lib.lammps_neighlist_num_elements.argtypes = [c_void_p, c_int]
self.lib.lammps_neighlist_num_elements.restype = c_int
self.lib.lammps_neighlist_element_neighbors.argtypes = [c_void_p, c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(POINTER(c_int))]
self.lib.lammps_neighlist_element_neighbors.restype = None
self.lib.lammps_is_running.argtypes = [c_void_p]
self.lib.lammps_is_running.restype = c_int
self.lib.lammps_force_timeout.argtypes = [c_void_p]
self.lib.lammps_has_error.argtypes = [c_void_p]
self.lib.lammps_has_error.restype = c_int
self.lib.lammps_get_last_error_message.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_get_last_error_message.restype = c_int
self.lib.lammps_extract_global.argtypes = [c_void_p, c_char_p]
self.lib.lammps_extract_global_datatype.argtypes = [c_void_p, c_char_p]
self.lib.lammps_extract_global_datatype.restype = c_int
self.lib.lammps_extract_compute.argtypes = [c_void_p, c_char_p, c_int, c_int]
self.lib.lammps_get_thermo.argtypes = [c_void_p, c_char_p]
self.lib.lammps_get_thermo.restype = c_double
self.lib.lammps_encode_image_flags.restype = self.c_imageint
self.lib.lammps_config_package_name.argtypes = [c_int, c_char_p, c_int]
self.lib.lammps_config_accelerator.argtypes = [c_char_p, c_char_p, c_char_p]
self.lib.lammps_set_variable.argtypes = [c_void_p, c_char_p, c_char_p]
self.lib.lammps_has_style.argtypes = [c_void_p, c_char_p, c_char_p]
self.lib.lammps_style_count.argtypes = [c_void_p, c_char_p]
self.lib.lammps_style_name.argtypes = [c_void_p, c_char_p, c_int, c_char_p, c_int]
self.lib.lammps_has_id.argtypes = [c_void_p, c_char_p, c_char_p]
self.lib.lammps_id_count.argtypes = [c_void_p, c_char_p]
self.lib.lammps_id_name.argtypes = [c_void_p, c_char_p, c_int, c_char_p, c_int]
self.lib.lammps_plugin_count.argtypes = [ ]
self.lib.lammps_plugin_name.argtypes = [c_int, c_char_p, c_char_p, c_int]
self.lib.lammps_version.argtypes = [c_void_p]
self.lib.lammps_get_os_info.argtypes = [c_char_p, c_int]
self.lib.lammps_get_gpu_device_info.argtypes = [c_char_p, c_int]
self.lib.lammps_get_mpi_comm.argtypes = [c_void_p]
self.lib.lammps_decode_image_flags.argtypes = [self.c_imageint, POINTER(c_int*3)]
self.lib.lammps_extract_atom.argtypes = [c_void_p, c_char_p]
self.lib.lammps_extract_atom_datatype.argtypes = [c_void_p, c_char_p]
self.lib.lammps_extract_atom_datatype.restype = c_int
self.lib.lammps_extract_fix.argtypes = [c_void_p, c_char_p, c_int, c_int, c_int, c_int]
self.lib.lammps_extract_variable.argtypes = [c_void_p, c_char_p, c_char_p]
self.lib.lammps_fix_external_get_force.argtypes = [c_void_p, c_char_p]
self.lib.lammps_fix_external_get_force.restype = POINTER(POINTER(c_double))
self.lib.lammps_fix_external_set_energy_global.argtypes = [c_void_p, c_char_p, c_double]
self.lib.lammps_fix_external_set_virial_global.argtypes = [c_void_p, c_char_p, POINTER(c_double)]
self.lib.lammps_fix_external_set_energy_peratom.argtypes = [c_void_p, c_char_p, POINTER(c_double)]
self.lib.lammps_fix_external_set_virial_peratom.argtypes = [c_void_p, c_char_p, POINTER(POINTER(c_double))]
self.lib.lammps_fix_external_set_vector_length.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_fix_external_set_vector.argtypes = [c_void_p, c_char_p, c_int, c_double]
# detect if Python is using a version of mpi4py that can pass communicators
# only needed if LAMMPS has been compiled with MPI support.
self.has_mpi4py = False
if self.has_mpi_support:
try:
from mpi4py import __version__ as mpi4py_version
# tested to work with mpi4py versions 2 and 3
self.has_mpi4py = mpi4py_version.split('.')[0] in ['2','3']
except ImportError:
# ignore failing import
pass
# if no ptr provided, create an instance of LAMMPS
# we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2+, we can pass MPI communicators to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if self.has_mpi_support and self.has_mpi4py:
from mpi4py import MPI
self.MPI = MPI
if comm:
if not self.has_mpi_support:
raise Exception('LAMMPS not compiled with real MPI library')
if not self.has_mpi4py:
raise Exception('Python mpi4py version is not 2 or 3')
if self.MPI._sizeof(self.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
# Detect whether LAMMPS and mpi4py definitely use different MPI libs
if sizeof(MPI_Comm) != self.lib.lammps_config_has_mpi_support():
raise Exception('Inconsistent MPI library in LAMMPS and mpi4py')
narg = 0
cargs = None
if cmdargs:
cmdargs.insert(0,"lammps")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p]
else:
self.lib.lammps_open.argtypes = [c_int, c_char_p, \
MPI_Comm, c_void_p]
self.opened = 1
comm_ptr = self.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lmp = c_void_p(self.lib.lammps_open(narg,cargs,comm_val,None))
else:
if self.has_mpi4py and self.has_mpi_support:
self.comm = self.MPI.COMM_WORLD
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open_no_mpi.argtypes = [c_int, c_char_p*narg, \
c_void_p]
self.lmp = c_void_p(self.lib.lammps_open_no_mpi(narg,cargs,None))
else:
self.lib.lammps_open_no_mpi.argtypes = [c_int, c_char_p, c_void_p]
self.lmp = c_void_p(self.lib.lammps_open_no_mpi(0,None,None))
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
self._installed_packages = None
self._available_styles = None
# check if liblammps version matches the installed python module version
# but not for in-place usage, i.e. when the version is 0
import lammps
if lammps.__version__ > 0 and lammps.__version__ != self.lib.lammps_version(self.lmp):
raise(AttributeError("LAMMPS Python module installed for LAMMPS version %d, but shared library is version %d" \
% (lammps.__version__, self.lib.lammps_version(self.lmp))))
# add way to insert Python callback for fix external
self.callback = {}
self.FIX_EXTERNAL_CALLBACK_FUNC = CFUNCTYPE(None, py_object, self.c_bigint, c_int, POINTER(self.c_tagint), POINTER(POINTER(c_double)), POINTER(POINTER(c_double)))
self.lib.lammps_set_fix_external_callback.argtypes = [c_void_p, c_char_p, self.FIX_EXTERNAL_CALLBACK_FUNC, py_object]
self.lib.lammps_set_fix_external_callback.restype = None
# -------------------------------------------------------------------------
# shut-down LAMMPS instance
def __del__(self):
self.close()
# -------------------------------------------------------------------------
# context manager implementation
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
self.close()
# -------------------------------------------------------------------------
@property
def numpy(self):
""" Return object to access numpy versions of API
It provides alternative implementations of API functions that
return numpy arrays instead of ctypes pointers. If numpy is not installed,
accessing this property will lead to an ImportError.
:return: instance of numpy wrapper object
:rtype: numpy_wrapper
"""
if not self._numpy:
from .numpy_wrapper import numpy_wrapper
self._numpy = numpy_wrapper(self)
return self._numpy
# -------------------------------------------------------------------------
def close(self):
"""Explicitly delete a LAMMPS instance through the C-library interface.
This is a wrapper around the :cpp:func:`lammps_close` function of the C-library interface.
"""
if self.lmp and self.opened:
self.lib.lammps_close(self.lmp)
self.lmp = None
self.opened = 0
# -------------------------------------------------------------------------
def finalize(self):
"""Shut down the MPI communication and Kokkos environment (if active) through the
library interface by calling :cpp:func:`lammps_mpi_finalize` and
:cpp:func:`lammps_kokkos_finalize`.
You cannot create or use any LAMMPS instances after this function is called
unless LAMMPS was compiled without MPI and without Kokkos support.
"""
self.close()
self.lib.lammps_kokkos_finalize()
self.lib.lammps_mpi_finalize()
# -------------------------------------------------------------------------
def version(self):
"""Return a numerical representation of the LAMMPS version in use.
This is a wrapper around the :cpp:func:`lammps_version` function of the C-library interface.
:return: version number
:rtype: int
"""
return self.lib.lammps_version(self.lmp)
# -------------------------------------------------------------------------
def get_os_info(self):
"""Return a string with information about the OS and compiler runtime
This is a wrapper around the :cpp:func:`lammps_get_os_info` function of the C-library interface.
:return: OS info string
:rtype: string
"""
sb = create_string_buffer(512)
self.lib.lammps_get_os_info(sb,512)
return sb.value.decode()
# -------------------------------------------------------------------------
def get_mpi_comm(self):
"""Get the MPI communicator in use by the current LAMMPS instance
This is a wrapper around the :cpp:func:`lammps_get_mpi_comm` function
of the C-library interface. It will return ``None`` if either the
LAMMPS library was compiled without MPI support or the mpi4py
Python module is not available.
:return: MPI communicator
:rtype: MPI_Comm
"""
if self.has_mpi4py and self.has_mpi_support:
from mpi4py import MPI
f_comm = self.lib.lammps_get_mpi_comm(self.lmp)
c_comm = MPI.Comm.f2py(f_comm)
return c_comm
else:
return None
# -------------------------------------------------------------------------
@property
def _lammps_exception(self):
sb = create_string_buffer(100)
error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100)
error_msg = sb.value.decode().strip()
if error_type == 2:
return MPIAbortException(error_msg)
return Exception(error_msg)
# -------------------------------------------------------------------------
def file(self, path):
"""Read LAMMPS commands from a file.
This is a wrapper around the :cpp:func:`lammps_file` function of the C-library interface.
It will open the file with the name/path `file` and process the LAMMPS commands line by line until
the end. The function will return when the end of the file is reached.
:param path: Name of the file/path with LAMMPS commands
:type path: string
"""
if path: path = path.encode()
else: return
with ExceptionCheck(self):
self.lib.lammps_file(self.lmp, path)
# -------------------------------------------------------------------------
def command(self,cmd):
"""Process a single LAMMPS input command from a string.
This is a wrapper around the :cpp:func:`lammps_command`
function of the C-library interface.
:param cmd: a single lammps command
:type cmd: string
"""
if cmd: cmd = cmd.encode()
else: return
with ExceptionCheck(self):
self.lib.lammps_command(self.lmp,cmd)
# -------------------------------------------------------------------------
def commands_list(self,cmdlist):
"""Process multiple LAMMPS input commands from a list of strings.
This is a wrapper around the
:cpp:func:`lammps_commands_list` function of
the C-library interface.
:param cmdlist: a single lammps command
:type cmdlist: list of strings
"""
cmds = [x.encode() for x in cmdlist if type(x) is str]
narg = len(cmdlist)
args = (c_char_p * narg)(*cmds)
self.lib.lammps_commands_list.argtypes = [c_void_p, c_int, c_char_p * narg]
with ExceptionCheck(self):
self.lib.lammps_commands_list(self.lmp,narg,args)
# -------------------------------------------------------------------------
def commands_string(self,multicmd):
"""Process a block of LAMMPS input commands from a string.
This is a wrapper around the
:cpp:func:`lammps_commands_string`
function of the C-library interface.
:param multicmd: text block of lammps commands
:type multicmd: string
"""
if type(multicmd) is str: multicmd = multicmd.encode()
with ExceptionCheck(self):
self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd))
# -------------------------------------------------------------------------
def get_natoms(self):
"""Get the total number of atoms in the LAMMPS instance.
Will be precise up to 53-bit signed integer due to the
underlying :cpp:func:`lammps_get_natoms` function returning a double.
:return: number of atoms
:rtype: int
"""
return int(self.lib.lammps_get_natoms(self.lmp))
# -------------------------------------------------------------------------
def extract_box(self):
"""Extract simulation box parameters
This is a wrapper around the :cpp:func:`lammps_extract_box` function
of the C-library interface. Unlike in the C function, the result is
returned as a list.
:return: list of the extracted data: boxlo, boxhi, xy, yz, xz, periodicity, box_change
:rtype: [ 3*double, 3*double, double, double, 3*int, int]
"""
boxlo = (3*c_double)()
boxhi = (3*c_double)()
xy = c_double()
yz = c_double()
xz = c_double()
periodicity = (3*c_int)()
box_change = c_int()
with ExceptionCheck(self):
self.lib.lammps_extract_box(self.lmp,boxlo,boxhi,
byref(xy),byref(yz),byref(xz),
periodicity,byref(box_change))
boxlo = boxlo[:3]
boxhi = boxhi[:3]
xy = xy.value
yz = yz.value
xz = xz.value
periodicity = periodicity[:3]
box_change = box_change.value
return boxlo,boxhi,xy,yz,xz,periodicity,box_change
# -------------------------------------------------------------------------
def reset_box(self,boxlo,boxhi,xy,yz,xz):
"""Reset simulation box parameters
This is a wrapper around the :cpp:func:`lammps_reset_box` function
of the C-library interface.
:param boxlo: new lower box boundaries
:type boxlo: list of 3 floating point numbers
:param boxhi: new upper box boundaries
:type boxhi: list of 3 floating point numbers
:param xy: xy tilt factor
:type xy: float
:param yz: yz tilt factor
:type yz: float
:param xz: xz tilt factor
:type xz: float
"""
cboxlo = (3*c_double)(*boxlo)
cboxhi = (3*c_double)(*boxhi)
with ExceptionCheck(self):
self.lib.lammps_reset_box(self.lmp,cboxlo,cboxhi,xy,yz,xz)
# -------------------------------------------------------------------------
def get_thermo(self,name):
"""Get current value of a thermo keyword
This is a wrapper around the :cpp:func:`lammps_get_thermo`
function of the C-library interface.
:param name: name of thermo keyword
:type name: string
:return: value of thermo keyword
:rtype: double or None
"""
if name: name = name.encode()
else: return None
with ExceptionCheck(self):
return self.lib.lammps_get_thermo(self.lmp,name)
# -------------------------------------------------------------------------
def extract_setting(self, name):
"""Query LAMMPS about global settings that can be expressed as an integer.
This is a wrapper around the :cpp:func:`lammps_extract_setting`
function of the C-library interface. Its documentation includes
a list of the supported keywords.
:param name: name of the setting
:type name: string
:return: value of the setting
:rtype: int
"""
if name: name = name.encode()
else: return None
return int(self.lib.lammps_extract_setting(self.lmp,name))
# -------------------------------------------------------------------------
# extract global info datatype
def extract_global_datatype(self, name):
"""Retrieve global property datatype from LAMMPS
This is a wrapper around the :cpp:func:`lammps_extract_global_datatype`
function of the C-library interface. Its documentation includes a
list of the supported keywords.
This function returns ``None`` if the keyword is not
recognized. Otherwise it will return a positive integer value that
corresponds to one of the :ref:`data type <py_datatype_constants>`
constants define in the :py:mod:`lammps` module.
:param name: name of the property
:type name: string
:return: data type of global property, see :ref:`py_datatype_constants`
:rtype: int
"""
if name: name = name.encode()
else: return None
return self.lib.lammps_extract_global_datatype(self.lmp, name)
# -------------------------------------------------------------------------
# extract global info
def extract_global(self, name, dtype=LAMMPS_AUTODETECT):
"""Query LAMMPS about global settings of different types.
This is a wrapper around the :cpp:func:`lammps_extract_global` function
of the C-library interface. Since there are no pointers in Python, this
method will - unlike the C function - return the value or a list of
values. The :cpp:func:`lammps_extract_global` documentation includes a
list of the supported keywords and their data types.
Since Python needs to know the data type to be able to interpret
the result, by default, this function will try to auto-detect the data type
by asking the library. You can also force a specific data type. For that
purpose the :py:mod:`lammps` module contains :ref:`data type <py_datatype_constants>`
constants. This function returns ``None`` if either the keyword is not recognized,
or an invalid data type constant is used.
:param name: name of the property
:type name: string
:param dtype: data type of the returned data (see :ref:`py_datatype_constants`)
:type dtype: int, optional
:return: value of the property or list of values or None
:rtype: int, float, list, or NoneType
"""
if dtype == LAMMPS_AUTODETECT:
dtype = self.extract_global_datatype(name)
# set length of vector for items that are not a scalar
vec_dict = { 'boxlo':3, 'boxhi':3, 'sublo':3, 'subhi':3,
'sublo_lambda':3, 'subhi_lambda':3, 'periodicity':3 }
if name in vec_dict:
veclen = vec_dict[name]
elif name == 'respa_dt':
veclen = self.extract_global('respa_levels',LAMMPS_INT)
else:
veclen = 1
if name: name = name.encode()
else: return None
if dtype == LAMMPS_INT:
self.lib.lammps_extract_global.restype = POINTER(c_int32)
target_type = int
elif dtype == LAMMPS_INT64:
self.lib.lammps_extract_global.restype = POINTER(c_int64)
target_type = int
elif dtype == LAMMPS_DOUBLE:
self.lib.lammps_extract_global.restype = POINTER(c_double)
target_type = float
elif dtype == LAMMPS_STRING:
self.lib.lammps_extract_global.restype = c_char_p
target_type = str
else:
target_type = None
ptr = self.lib.lammps_extract_global(self.lmp, name)
if ptr:
if dtype == LAMMPS_STRING:
return ptr.decode('utf-8')
if veclen > 1:
result = []
for i in range(0,veclen):
result.append(target_type(ptr[i]))
return result
else: return target_type(ptr[0])
return None
# -------------------------------------------------------------------------
# extract per-atom info datatype
def extract_atom_datatype(self, name):
"""Retrieve per-atom property datatype from LAMMPS
This is a wrapper around the :cpp:func:`lammps_extract_atom_datatype`
function of the C-library interface. Its documentation includes a
list of the supported keywords.
This function returns ``None`` if the keyword is not
recognized. Otherwise it will return an integer value that
corresponds to one of the :ref:`data type <py_datatype_constants>` constants
defined in the :py:mod:`lammps` module.
:param name: name of the property
:type name: string
:return: data type of per-atom property (see :ref:`py_datatype_constants`)
:rtype: int
"""
if name: name = name.encode()
else: return None
return self.lib.lammps_extract_atom_datatype(self.lmp, name)
# -------------------------------------------------------------------------
# extract per-atom info
def extract_atom(self, name, dtype=LAMMPS_AUTODETECT):
"""Retrieve per-atom properties from LAMMPS
This is a wrapper around the :cpp:func:`lammps_extract_atom`
function of the C-library interface. Its documentation includes a
list of the supported keywords and their data types.
Since Python needs to know the data type to be able to interpret
the result, by default, this function will try to auto-detect the data type
by asking the library. You can also force a specific data type by setting ``dtype``
to one of the :ref:`data type <py_datatype_constants>` constants defined in the
:py:mod:`lammps` module.
This function returns ``None`` if either the keyword is not
recognized, or an invalid data type constant is used.
.. note::
While the returned arrays of per-atom data are dimensioned
for the range [0:nmax] - as is the underlying storage -
the data is usually only valid for the range of [0:nlocal],
unless the property of interest is also updated for ghost
atoms. In some cases, this depends on a LAMMPS setting, see
for example :doc:`comm_modify vel yes <comm_modify>`.
:param name: name of the property
:type name: string
:param dtype: data type of the returned data (see :ref:`py_datatype_constants`)
:type dtype: int, optional
:return: requested data or ``None``
:rtype: ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.POINTER(ctypes.c_int32)),
ctypes.POINTER(ctypes.c_int64), ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
or NoneType
"""
if dtype == LAMMPS_AUTODETECT:
dtype = self.extract_atom_datatype(name)
if name: name = name.encode()
else: return None
if dtype == LAMMPS_INT:
self.lib.lammps_extract_atom.restype = POINTER(c_int32)
elif dtype == LAMMPS_INT_2D:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int32))
elif dtype == LAMMPS_DOUBLE:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif dtype == LAMMPS_DOUBLE_2D:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
elif dtype == LAMMPS_INT64:
self.lib.lammps_extract_atom.restype = POINTER(c_int64)
elif dtype == LAMMPS_INT64_2D:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int64))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp, name)
if ptr: return ptr
else: return None
# -------------------------------------------------------------------------
def extract_compute(self,cid,cstyle,ctype):
"""Retrieve data from a LAMMPS compute
This is a wrapper around the :cpp:func:`lammps_extract_compute`
function of the C-library interface.
This function returns ``None`` if either the compute id is not
recognized, or an invalid combination of :ref:`cstyle <py_style_constants>`
and :ref:`ctype <py_type_constants>` constants is used. The
names and functionality of the constants are the same as for
the corresponding C-library function. For requests to return
a scalar or a size, the value is returned, otherwise a pointer.
:param cid: compute ID
:type cid: string
:param cstyle: style of the data retrieve (global, atom, or local), see :ref:`py_style_constants`
:type cstyle: int
:param ctype: type or size of the returned data (scalar, vector, or array), see :ref:`py_type_constants`
:type ctype: int
:return: requested data as scalar, pointer to 1d or 2d double array, or None
:rtype: c_double, ctypes.POINTER(c_double), ctypes.POINTER(ctypes.POINTER(c_double)), or NoneType
"""
if cid: cid = cid.encode()
else: return None
if ctype == LMP_TYPE_SCALAR:
if cstyle == LMP_STYLE_GLOBAL:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr[0]
elif cstyle == LMP_STYLE_ATOM:
return None
elif cstyle == LMP_STYLE_LOCAL:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr[0]
elif ctype == LMP_TYPE_VECTOR:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr
elif ctype == LMP_TYPE_ARRAY:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr
elif ctype == LMP_SIZE_COLS:
if cstyle == LMP_STYLE_GLOBAL \
or cstyle == LMP_STYLE_ATOM \
or cstyle == LMP_STYLE_LOCAL:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr[0]
elif ctype == LMP_SIZE_VECTOR or ctype == LMP_SIZE_ROWS:
if cstyle == LMP_STYLE_GLOBAL \
or cstyle == LMP_STYLE_LOCAL:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_compute(self.lmp,cid,cstyle,ctype)
return ptr[0]
return None
# -------------------------------------------------------------------------
# extract fix info
# in case of global data, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,fid,fstyle,ftype,nrow=0,ncol=0):
"""Retrieve data from a LAMMPS fix
This is a wrapper around the :cpp:func:`lammps_extract_fix`
function of the C-library interface.
This function returns ``None`` if either the fix id is not
recognized, or an invalid combination of :ref:`fstyle <py_style_constants>`
and :ref:`ftype <py_type_constants>` constants is used. The
names and functionality of the constants are the same as for
the corresponding C-library function. For requests to return
a scalar or a size, the value is returned, also when accessing
global vectors or arrays, otherwise a pointer.
:param fid: fix ID
:type fid: string
:param fstyle: style of the data retrieve (global, atom, or local), see :ref:`py_style_constants`
:type fstyle: int
:param ftype: type or size of the returned data (scalar, vector, or array), see :ref:`py_type_constants`
:type ftype: int
:param nrow: index of global vector element or row index of global array element
:type nrow: int
:param ncol: column index of global array element
:type ncol: int
:return: requested data or None
:rtype: c_double, ctypes.POINTER(c_double), ctypes.POINTER(ctypes.POINTER(c_double)), or NoneType
"""
if fid: fid = fid.encode()
else: return None
if fstyle == LMP_STYLE_GLOBAL:
if ftype in (LMP_TYPE_SCALAR, LMP_TYPE_VECTOR, LMP_TYPE_ARRAY):
self.lib.lammps_extract_fix.restype = POINTER(c_double)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_fix(self.lmp,fid,fstyle,ftype,nrow,ncol)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif ftype in (LMP_SIZE_VECTOR, LMP_SIZE_ROWS, LMP_SIZE_COLS):
self.lib.lammps_extract_fix.restype = POINTER(c_int)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_fix(self.lmp,fid,fstyle,ftype,nrow,ncol)
return ptr[0]
else:
return None
elif fstyle == LMP_STYLE_ATOM:
if ftype == LMP_TYPE_VECTOR:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif ftype == LMP_TYPE_ARRAY:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
elif ftype == LMP_SIZE_COLS:
self.lib.lammps_extract_fix.restype = POINTER(c_int)
else:
return None
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_fix(self.lmp,fid,fstyle,ftype,nrow,ncol)
if ftype == LMP_SIZE_COLS:
return ptr[0]
else:
return ptr
elif fstyle == LMP_STYLE_LOCAL:
if ftype == LMP_TYPE_VECTOR:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif ftype == LMP_TYPE_ARRAY:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
elif ftype in (LMP_TYPE_SCALAR, LMP_SIZE_VECTOR, LMP_SIZE_ROWS, LMP_SIZE_COLS):
self.lib.lammps_extract_fix.restype = POINTER(c_int)
else:
return None
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_fix(self.lmp,fid,fstyle,ftype,nrow,ncol)
if ftype in (LMP_TYPE_VECTOR, LMP_TYPE_ARRAY):
return ptr
else:
return ptr[0]
else:
return None
# -------------------------------------------------------------------------
# extract variable info
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self, name, group=None, vartype=LMP_VAR_EQUAL):
""" Evaluate a LAMMPS variable and return its data
This function is a wrapper around the function
:cpp:func:`lammps_extract_variable` of the C-library interface,
evaluates variable name and returns a copy of the computed data.
The memory temporarily allocated by the C-interface is deleted
after the data is copied to a Python variable or list.
The variable must be either an equal-style (or equivalent)
variable or an atom-style variable. The variable type has to
provided as ``vartype`` parameter which may be one of two constants:
``LMP_VAR_EQUAL`` or ``LMP_VAR_ATOM``; it defaults to
equal-style variables.
The group parameter is only used for atom-style variables and
defaults to the group "all" if set to ``None``, which is the default.
:param name: name of the variable to execute
:type name: string
:param group: name of group for atom-style variable
:type group: string, only for atom-style variables
:param vartype: type of variable, see :ref:`py_vartype_constants`
:type vartype: int
:return: the requested data
:rtype: c_double, (c_double), or NoneType
"""
if name: name = name.encode()
else: return None
if group: group = group.encode()
if vartype == LMP_VAR_EQUAL:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
if ptr: result = ptr[0]
else: return None
self.lib.lammps_free(ptr)
return result
elif vartype == LMP_VAR_ATOM:
nlocal = self.extract_global("nlocal")
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
with ExceptionCheck(self):
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
if ptr:
for i in range(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
else: return None
return result
return None
# -------------------------------------------------------------------------
def set_variable(self,name,value):
"""Set a new value for a LAMMPS string style variable
This is a wrapper around the :cpp:func:`lammps_set_variable`
function of the C-library interface.
:param name: name of the variable
:type name: string
:param value: new variable value
:type value: any. will be converted to a string
:return: either 0 on success or -1 on failure
:rtype: int
"""
if name: name = name.encode()
else: return -1
if value: value = str(value).encode()
else: return -1
with ExceptionCheck(self):
return self.lib.lammps_set_variable(self.lmp,name,value)
# -------------------------------------------------------------------------
# return vector of atom properties gathered across procs
# 3 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# dtype = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def gather_atoms(self,name,dtype,count):
if name: name = name.encode()
natoms = self.get_natoms()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,dtype,count,data)
elif dtype == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,dtype,count,data)
else:
return None
return data
# -------------------------------------------------------------------------
def gather_atoms_concat(self,name,dtype,count):
if name: name = name.encode()
natoms = self.get_natoms()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,dtype,count,data)
elif dtype == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,dtype,count,data)
else:
return None
return data
def gather_atoms_subset(self,name,dtype,count,ndata,ids):
if name: name = name.encode()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*ndata)*c_int)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,dtype,count,ndata,ids,data)
elif dtype == 1:
data = ((count*ndata)*c_double)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,dtype,count,ndata,ids,data)
else:
return None
return data
# -------------------------------------------------------------------------
# scatter vector of atom properties across procs
# 2 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def scatter_atoms(self,name,dtype,count,data):
if name: name = name.encode()
with ExceptionCheck(self):
self.lib.lammps_scatter_atoms(self.lmp,name,dtype,count,data)
# -------------------------------------------------------------------------
def scatter_atoms_subset(self,name,dtype,count,ndata,ids,data):
if name: name = name.encode()
with ExceptionCheck(self):
self.lib.lammps_scatter_atoms_subset(self.lmp,name,dtype,count,ndata,ids,data)
# -------------------------------------------------------------------------
def gather_bonds(self):
"""Retrieve global list of bonds
This is a wrapper around the :cpp:func:`lammps_gather_bonds`
function of the C-library interface.
This function returns a tuple with the number of bonds and a
flat list of ctypes integer values with the bond type, bond atom1,
bond atom2 for each bond.
.. versionadded:: 28Jul2021
:return: a tuple with the number of bonds and a list of c_int or c_long
:rtype: (int, 3*nbonds*c_tagint)
"""
nbonds = self.extract_global("nbonds")
with ExceptionCheck(self):
data = ((3*nbonds)*self.c_tagint)()
self.lib.lammps_gather_bonds(self.lmp,data)
return nbonds,data
# -------------------------------------------------------------------------
# return vector of atom/compute/fix properties gathered across procs
# 3 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def gather(self,name,dtype,count):
if name: name = name.encode()
natoms = self.get_natoms()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather(self.lmp,name,dtype,count,data)
elif dtype == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather(self.lmp,name,dtype,count,data)
else:
return None
return data
def gather_concat(self,name,dtype,count):
if name: name = name.encode()
natoms = self.get_natoms()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_concat(self.lmp,name,dtype,count,data)
elif dtype == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_concat(self.lmp,name,dtype,count,data)
else:
return None
return data
def gather_subset(self,name,dtype,count,ndata,ids):
if name: name = name.encode()
with ExceptionCheck(self):
if dtype == 0:
data = ((count*ndata)*c_int)()
self.lib.lammps_gather_subset(self.lmp,name,dtype,count,ndata,ids,data)
elif dtype == 1:
data = ((count*ndata)*c_double)()
self.lib.lammps_gather_subset(self.lmp,name,dtype,count,ndata,ids,data)
else:
return None
return data
# scatter vector of atom/compute/fix properties across procs
# 2 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def scatter(self,name,dtype,count,data):
if name: name = name.encode()
with ExceptionCheck(self):
self.lib.lammps_scatter(self.lmp,name,dtype,count,data)
def scatter_subset(self,name,dtype,count,ndata,ids,data):
if name: name = name.encode()
with ExceptionCheck(self):
self.lib.lammps_scatter_subset(self.lmp,name,dtype,count,ndata,ids,data)
# -------------------------------------------------------------------------
def encode_image_flags(self,ix,iy,iz):
""" convert 3 integers with image flags for x-, y-, and z-direction
into a single integer like it is used internally in LAMMPS
This method is a wrapper around the :cpp:func:`lammps_encode_image_flags`
function of library interface.
:param ix: x-direction image flag
:type ix: int
:param iy: y-direction image flag
:type iy: int
:param iz: z-direction image flag
:type iz: int
:return: encoded image flags
:rtype: lammps.c_imageint
"""
return self.lib.lammps_encode_image_flags(ix,iy,iz)
# -------------------------------------------------------------------------
def decode_image_flags(self,image):
""" Convert encoded image flag integer into list of three regular integers.
This method is a wrapper around the :cpp:func:`lammps_decode_image_flags`
function of library interface.
:param image: encoded image flags
:type image: lammps.c_imageint
:return: list of three image flags in x-, y-, and z- direction
:rtype: list of 3 int
"""
flags = (c_int*3)()
self.lib.lammps_decode_image_flags(image,byref(flags))
return [int(i) for i in flags]
# -------------------------------------------------------------------------
# create N atoms on all procs
# N = global number of atoms
# id = ID of each atom (optional, can be None)
# type = type of each atom (1 to Ntypes) (required)
# x = coords of each atom as (N,3) array (required)
# v = velocity of each atom as (N,3) array (optional, can be None)
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
# ditto for gather_atoms() above
def create_atoms(self,n,id,type,x,v=None,image=None,shrinkexceed=False):
"""
Create N atoms from list of coordinates and properties
This function is a wrapper around the :cpp:func:`lammps_create_atoms`
function of the C-library interface, and the behavior is similar except
that the *v*, *image*, and *shrinkexceed* arguments are optional and
default to *None*, *None*, and *False*, respectively. With none being
equivalent to a ``NULL`` pointer in C.
The lists of coordinates, types, atom IDs, velocities, image flags can
be provided in any format that may be converted into the required
internal data types. Also the list may contain more than *N* entries,
but not fewer. In the latter case, the function will return without
attempting to create atoms. You may use the :py:func:`encode_image_flags
<lammps.encode_image_flags>` method to properly combine three integers
with image flags into a single integer.
:param n: number of atoms for which data is provided
:type n: int
:param id: list of atom IDs with at least n elements or None
:type id: list of lammps.tagint
:param type: list of atom types
:type type: list of int
:param x: list of coordinates for x-, y-, and z (flat list of 3n entries)
:type x: list of float
:param v: list of velocities for x-, y-, and z (flat list of 3n entries) or None (optional)
:type v: list of float
:param image: list of encoded image flags (optional)
:type image: list of lammps.imageint
:param shrinkexceed: whether to expand shrink-wrap boundaries if atoms are outside the box (optional)
:type shrinkexceed: bool
:return: number of atoms created. 0 if insufficient or invalid data
:rtype: int
"""
if id:
id_lmp = (self.c_tagint*n)()
try:
id_lmp[:] = id[0:n]
except ValueError:
return 0
else:
id_lmp = None
type_lmp = (c_int*n)()
try:
type_lmp[:] = type[0:n]
except ValueError:
return 0
three_n = 3*n
x_lmp = (c_double*three_n)()
try:
x_lmp[:] = x[0:three_n]
except ValueError:
return 0
if v:
v_lmp = (c_double*(three_n))()
try:
v_lmp[:] = v[0:three_n]
except ValueError:
return 0
else:
v_lmp = None
if image:
img_lmp = (self.c_imageint*n)()
try:
img_lmp[:] = image[0:n]
except ValueError:
return 0
else:
img_lmp = None
if shrinkexceed:
se_lmp = 1
else:
se_lmp = 0
self.lib.lammps_create_atoms.argtypes = [c_void_p, c_int, POINTER(self.c_tagint*n),
POINTER(c_int*n), POINTER(c_double*three_n),
POINTER(c_double*three_n),
POINTER(self.c_imageint*n), c_int]
with ExceptionCheck(self):
return self.lib.lammps_create_atoms(self.lmp, n, id_lmp, type_lmp, x_lmp, v_lmp, img_lmp, se_lmp)
# -------------------------------------------------------------------------
@property
def has_mpi_support(self):
""" Report whether the LAMMPS shared library was compiled with a
real MPI library or in serial.
This is a wrapper around the :cpp:func:`lammps_config_has_mpi_support`
function of the library interface.
:return: False when compiled with MPI STUBS, otherwise True
:rtype: bool
"""
return self.lib.lammps_config_has_mpi_support() != 0
# -------------------------------------------------------------------------
@property
def is_running(self):
""" Report whether being called from a function during a run or a minimization
Various LAMMPS commands must not be called during an ongoing
run or minimization. This property allows to check for that.
This is a wrapper around the :cpp:func:`lammps_is_running`
function of the library interface.
.. versionadded:: 9Oct2020
:return: True when called during a run otherwise false
:rtype: bool
"""
return self.lib.lammps_is_running(self.lmp) == 1
# -------------------------------------------------------------------------
def force_timeout(self):
""" Trigger an immediate timeout, i.e. a "soft stop" of a run.
This function allows to cleanly stop an ongoing run or minimization
at the next loop iteration.
This is a wrapper around the :cpp:func:`lammps_force_timeout`
function of the library interface.
.. versionadded:: 9Oct2020
"""
self.lib.lammps_force_timeout(self.lmp)
# -------------------------------------------------------------------------
@property
def has_exceptions(self):
""" Report whether the LAMMPS shared library was compiled with C++
exceptions handling enabled
This is a wrapper around the :cpp:func:`lammps_config_has_exceptions`
function of the library interface.
:return: state of C++ exception support
:rtype: bool
"""
return self.lib.lammps_config_has_exceptions() != 0
# -------------------------------------------------------------------------
@property
def has_gzip_support(self):
""" Report whether the LAMMPS shared library was compiled with support
for reading and writing compressed files through ``gzip``.
This is a wrapper around the :cpp:func:`lammps_config_has_gzip_support`
function of the library interface.
:return: state of gzip support
:rtype: bool
"""
return self.lib.lammps_config_has_gzip_support() != 0
# -------------------------------------------------------------------------
@property
def has_png_support(self):
""" Report whether the LAMMPS shared library was compiled with support
for writing images in PNG format.
This is a wrapper around the :cpp:func:`lammps_config_has_png_support`
function of the library interface.
:return: state of PNG support
:rtype: bool
"""
return self.lib.lammps_config_has_png_support() != 0
# -------------------------------------------------------------------------
@property
def has_jpeg_support(self):
""" Report whether the LAMMPS shared library was compiled with support
for writing images in JPEG format.
This is a wrapper around the :cpp:func:`lammps_config_has_jpeg_support`
function of the library interface.
:return: state of JPEG support
:rtype: bool
"""
return self.lib.lammps_config_has_jpeg_support() != 0
# -------------------------------------------------------------------------
@property
def has_ffmpeg_support(self):
""" State of support for writing movies with ``ffmpeg`` in the LAMMPS shared library
This is a wrapper around the :cpp:func:`lammps_config_has_ffmpeg_support`
function of the library interface.
:return: state of ffmpeg support
:rtype: bool
"""
return self.lib.lammps_config_has_ffmpeg_support() != 0
# -------------------------------------------------------------------------
@property
def accelerator_config(self):
""" Return table with available accelerator configuration settings.
This is a wrapper around the :cpp:func:`lammps_config_accelerator`
function of the library interface which loops over all known packages
and categories and returns enabled features as a nested dictionary
with all enabled settings as list of strings.
:return: nested dictionary with all known enabled settings as list of strings
:rtype: dictionary
"""
result = {}
for p in ['GPU', 'KOKKOS', 'INTEL', 'OPENMP']:
result[p] = {}
c = 'api'
result[p][c] = []
for s in ['cuda', 'hip', 'phi', 'pthreads', 'opencl', 'openmp', 'serial']:
if self.lib.lammps_config_accelerator(p.encode(),c.encode(),s.encode()):
result[p][c].append(s)
c = 'precision'
result[p][c] = []
for s in ['double', 'mixed', 'single']:
if self.lib.lammps_config_accelerator(p.encode(),c.encode(),s.encode()):
result[p][c].append(s)
return result
# -------------------------------------------------------------------------
@property
def has_gpu_device(self):
""" Availability of GPU package compatible device
This is a wrapper around the :cpp:func:`lammps_has_gpu_device`
function of the C library interface.
:return: True if a GPU package compatible device is present, otherwise False
:rtype: bool
"""
return self.lib.lammps_has_gpu_device() != 0
# -------------------------------------------------------------------------
def get_gpu_device_info(self):
"""Return a string with detailed information about any devices that are
usable by the GPU package.
This is a wrapper around the :cpp:func:`lammps_get_gpu_device_info`
function of the C-library interface.
:return: GPU device info string
:rtype: string
"""
sb = create_string_buffer(8192)
self.lib.lammps_get_gpu_device_info(sb,8192)
return sb.value.decode()
# -------------------------------------------------------------------------
@property
def installed_packages(self):
""" List of the names of enabled packages in the LAMMPS shared library
This is a wrapper around the functions :cpp:func:`lammps_config_package_count`
and :cpp:func`lammps_config_package_name` of the library interface.
:return
"""
if self._installed_packages is None:
self._installed_packages = []
npackages = self.lib.lammps_config_package_count()
sb = create_string_buffer(100)
for idx in range(npackages):
self.lib.lammps_config_package_name(idx, sb, 100)
self._installed_packages.append(sb.value.decode())
return self._installed_packages
# -------------------------------------------------------------------------
def has_style(self, category, name):
"""Returns whether a given style name is available in a given category
This is a wrapper around the function :cpp:func:`lammps_has_style`
of the library interface.
:param category: name of category
:type category: string
:param name: name of the style
:type name: string
:return: true if style is available in given category
:rtype: bool
"""
return self.lib.lammps_has_style(self.lmp, category.encode(), name.encode()) != 0
# -------------------------------------------------------------------------
def available_styles(self, category):
"""Returns a list of styles available for a given category
This is a wrapper around the functions :cpp:func:`lammps_style_count()`
and :cpp:func:`lammps_style_name()` of the library interface.
:param category: name of category
:type category: string
:return: list of style names in given category
:rtype: list
"""
if self._available_styles is None:
self._available_styles = {}
if category not in self._available_styles:
self._available_styles[category] = []
with ExceptionCheck(self):
nstyles = self.lib.lammps_style_count(self.lmp, category.encode())
sb = create_string_buffer(100)
for idx in range(nstyles):
with ExceptionCheck(self):
self.lib.lammps_style_name(self.lmp, category.encode(), idx, sb, 100)
self._available_styles[category].append(sb.value.decode())
return self._available_styles[category]
# -------------------------------------------------------------------------
def has_id(self, category, name):
"""Returns whether a given ID name is available in a given category
This is a wrapper around the function :cpp:func:`lammps_has_id`
of the library interface.
.. versionadded:: 9Oct2020
:param category: name of category
:type category: string
:param name: name of the ID
:type name: string
:return: true if ID is available in given category
:rtype: bool
"""
return self.lib.lammps_has_id(self.lmp, category.encode(), name.encode()) != 0
# -------------------------------------------------------------------------
def available_ids(self, category):
"""Returns a list of IDs available for a given category
This is a wrapper around the functions :cpp:func:`lammps_id_count()`
and :cpp:func:`lammps_id_name()` of the library interface.
.. versionadded:: 9Oct2020
:param category: name of category
:type category: string
:return: list of id names in given category
:rtype: list
"""
categories = ['compute','dump','fix','group','molecule','region','variable']
available_ids = []
if category in categories:
num = self.lib.lammps_id_count(self.lmp, category.encode())
sb = create_string_buffer(100)
for idx in range(num):
self.lib.lammps_id_name(self.lmp, category.encode(), idx, sb, 100)
available_ids.append(sb.value.decode())
return available_ids
# -------------------------------------------------------------------------
def available_plugins(self, category):
"""Returns a list of plugins available for a given category
This is a wrapper around the functions :cpp:func:`lammps_plugin_count()`
and :cpp:func:`lammps_plugin_name()` of the library interface.
.. versionadded:: 10Mar2021
:return: list of style/name pairs of loaded plugins
:rtype: list
"""
available_plugins = []
num = self.lib.lammps_plugin_count(self.lmp)
sty = create_string_buffer(100)
nam = create_string_buffer(100)
for idx in range(num):
self.lib.lammps_plugin_name(idx, sty, nam, 100)
available_plugins.append([sty.value.decode(), nam.value.decode()])
return available_plugins
# -------------------------------------------------------------------------
def set_fix_external_callback(self, fix_id, callback, caller=None):
"""Set the callback function for a fix external instance with a given fix ID.
Optionally also set a reference to the calling object.
This is a wrapper around the :cpp:func:`lammps_set_fix_external_callback` function
of the C-library interface. However this is set up to call a Python function with
the following arguments.
.. code-block: python
def func(object, ntimestep, nlocal, tag, x, f):
- object is the value of the "caller" argument
- ntimestep is the current timestep
- nlocal is the number of local atoms on the current MPI process
- tag is a 1d NumPy array of integers representing the atom IDs of the local atoms
- x is a 2d NumPy array of doubles of the coordinates of the local atoms
- f is a 2d NumPy array of doubles of the forces on the local atoms that will be added
.. versionchanged:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param callback: Python function that will be called from fix external
:type: function
:param caller: reference to some object passed to the callback function
:type: object, optional
"""
import numpy as np
def callback_wrapper(caller, ntimestep, nlocal, tag_ptr, x_ptr, fext_ptr):
tag = self.numpy.iarray(self.c_tagint, tag_ptr, nlocal, 1)
x = self.numpy.darray(x_ptr, nlocal, 3)
f = self.numpy.darray(fext_ptr, nlocal, 3)
callback(caller, ntimestep, nlocal, tag, x, f)
cFunc = self.FIX_EXTERNAL_CALLBACK_FUNC(callback_wrapper)
cCaller = caller
self.callback[fix_id] = { 'function': cFunc, 'caller': caller }
with ExceptionCheck(self):
self.lib.lammps_set_fix_external_callback(self.lmp, fix_id.encode(), cFunc, cCaller)
# -------------------------------------------------------------------------
def fix_external_get_force(self, fix_id):
"""Get access to the array with per-atom forces of a fix external instance with a given fix ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_get_force` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:return: requested data
:rtype: ctypes.POINTER(ctypes.POINTER(ctypes.double))
"""
with ExceptionCheck(self):
return self.lib.lammps_fix_external_get_force(self.lmp, fix_id.encode())
# -------------------------------------------------------------------------
def fix_external_set_energy_global(self, fix_id, eng):
"""Set the global energy contribution for a fix external instance with the given ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_set_energy_global` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param eng: potential energy value to be added by fix external
:type: float
"""
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_energy_global(self.lmp, fix_id.encode(), eng)
# -------------------------------------------------------------------------
def fix_external_set_virial_global(self, fix_id, virial):
"""Set the global virial contribution for a fix external instance with the given ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_set_virial_global` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param eng: list of 6 floating point numbers with the virial to be added by fix external
:type: float
"""
cvirial = (6*c_double)(*virial)
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_virial_global(self.lmp, fix_id.encode(), cvirial)
# -------------------------------------------------------------------------
def fix_external_set_energy_peratom(self, fix_id, eatom):
"""Set the per-atom energy contribution for a fix external instance with the given ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_set_energy_peratom` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param eatom: list of potential energy values for local atoms to be added by fix external
:type: float
"""
nlocal = self.extract_setting('nlocal')
if len(eatom) < nlocal:
raise Exception('per-atom energy list length must be at least nlocal')
ceatom = (nlocal*c_double)(*eatom)
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_energy_peratom(self.lmp, fix_id.encode(), ceatom)
# -------------------------------------------------------------------------
def fix_external_set_virial_peratom(self, fix_id, vatom):
"""Set the per-atom virial contribution for a fix external instance with the given ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_set_virial_peratom` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param vatom: list of natoms lists with 6 floating point numbers to be added by fix external
:type: float
"""
# copy virial data to C compatible buffer
nlocal = self.extract_setting('nlocal')
if len(vatom) < nlocal:
raise Exception('per-atom virial first dimension must be at least nlocal')
if len(vatom[0]) != 6:
raise Exception('per-atom virial second dimension must be 6')
vbuf = (c_double * 6)
vptr = POINTER(c_double)
c_virial = (vptr * nlocal)()
for i in range(nlocal):
c_virial[i] = vbuf()
for j in range(6):
c_virial[i][j] = vatom[i][j]
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_virial_peratom(self.lmp, fix_id.encode(), c_virial)
# -------------------------------------------------------------------------
def fix_external_set_vector_length(self, fix_id, length):
"""Set the vector length for a global vector stored with fix external for analysis
This is a wrapper around the :cpp:func:`lammps_fix_external_set_vector_length` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param length: length of the global vector
:type: int
"""
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_vector_length(self.lmp, fix_id.encode(), length)
# -------------------------------------------------------------------------
def fix_external_set_vector(self, fix_id, idx, val):
"""Store a global vector value for a fix external instance with the given ID.
This is a wrapper around the :cpp:func:`lammps_fix_external_set_vector` function
of the C-library interface.
.. versionadded:: 28Jul2021
:param fix_id: Fix-ID of a fix external instance
:type: string
:param idx: 1-based index of the value in the global vector
:type: int
:param val: value to be stored in the global vector
:type: float
"""
with ExceptionCheck(self):
return self.lib.lammps_fix_external_set_vector(self.lmp, fix_id.encode(), idx, val)
# -------------------------------------------------------------------------
def get_neighlist(self, idx):
"""Returns an instance of :class:`NeighList` which wraps access to the neighbor list with the given index
See :py:meth:`lammps.numpy.get_neighlist() <lammps.numpy_wrapper.numpy_wrapper.get_neighlist()>` if you want to use
NumPy arrays instead of ``c_int`` pointers.
:param idx: index of neighbor list
:type idx: int
:return: an instance of :class:`NeighList` wrapping access to neighbor list data
:rtype: NeighList
"""
if idx < 0:
return None
return NeighList(self, idx)
# -------------------------------------------------------------------------
def get_neighlist_size(self, idx):
"""Return the number of elements in neighbor list with the given index
:param idx: neighbor list index
:type idx: int
:return: number of elements in neighbor list with index idx
:rtype: int
"""
return self.lib.lammps_neighlist_num_elements(self.lmp, idx)
# -------------------------------------------------------------------------
def get_neighlist_element_neighbors(self, idx, element):
"""Return data of neighbor list entry
:param element: neighbor list index
:type element: int
:param element: neighbor list element index
:type element: int
:return: tuple with atom local index, number of neighbors and array of neighbor local atom indices
:rtype: (int, int, POINTER(c_int))
"""
c_iatom = c_int()
c_numneigh = c_int()
c_neighbors = POINTER(c_int)()
self.lib.lammps_neighlist_element_neighbors(self.lmp, idx, element, byref(c_iatom), byref(c_numneigh), byref(c_neighbors))
return c_iatom.value, c_numneigh.value, c_neighbors
# -------------------------------------------------------------------------
def find_pair_neighlist(self, style, exact=True, nsub=0, reqid=0):
"""Find neighbor list index of pair style neighbor list
Search for a neighbor list requested by a pair style instance that
matches "style". If exact is True, the pair style name must match
exactly. If exact is False, the pair style name is matched against
"style" as regular expression or sub-string. If the pair style is a
hybrid pair style, the style is instead matched against the hybrid
sub-styles. If the same pair style is used as sub-style multiple
types, you must set nsub to a value n > 0 which indicates the nth
instance of that sub-style to be used (same as for the pair_coeff
command). The default value of 0 will fail to match in that case.
Once the pair style instance has been identified, it may have
requested multiple neighbor lists. Those are uniquely identified by
a request ID > 0 as set by the pair style. Otherwise the request
ID is 0.
:param style: name of pair style that should be searched for
:type style: string
:param exact: controls whether style should match exactly or only must be contained in pair style name, defaults to True
:type exact: bool, optional
:param nsub: match nsub-th hybrid sub-style, defaults to 0
:type nsub: int, optional
:param reqid: list request id, > 0 in case there are more than one, defaults to 0
:type reqid: int, optional
:return: neighbor list index if found, otherwise -1
:rtype: int
"""
style = style.encode()
exact = int(exact)
idx = self.lib.lammps_find_pair_neighlist(self.lmp, style, exact, nsub, reqid)
return idx
# -------------------------------------------------------------------------
def find_fix_neighlist(self, fixid, reqid=0):
"""Find neighbor list index of fix neighbor list
The fix instance requesting the neighbor list is uniquely identified
by the fix ID. In case the fix has requested multiple neighbor
lists, those are uniquely identified by a request ID > 0 as set by
the fix. Otherwise the request ID is 0 (the default).
:param fixid: name of fix
:type fixid: string
:param reqid: id of neighbor list request, in case there are more than one request, defaults to 0
:type reqid: int, optional
:return: neighbor list index if found, otherwise -1
:rtype: int
"""
fixid = fixid.encode()
idx = self.lib.lammps_find_fix_neighlist(self.lmp, fixid, reqid)
return idx
# -------------------------------------------------------------------------
def find_compute_neighlist(self, computeid, reqid=0):
"""Find neighbor list index of compute neighbor list
The compute instance requesting the neighbor list is uniquely
identified by the compute ID. In case the compute has requested
multiple neighbor lists, those are uniquely identified by a request
ID > 0 as set by the compute. Otherwise the request ID is 0 (the
default).
:param computeid: name of compute
:type computeid: string
:param reqid: index of neighbor list request, in case there are more than one request, defaults to 0
:type reqid: int, optional
:return: neighbor list index if found, otherwise -1
:rtype: int
"""
computeid = computeid.encode()
idx = self.lib.lammps_find_compute_neighlist(self.lmp, computeid, reqid)
return idx
|
jeremiahyan/lammps
|
python/lammps/core.py
|
Python
|
gpl-2.0
| 78,731
|
[
"LAMMPS"
] |
114eaf91c0a83481fc7651dfac2cf4d08cf53b04ea41c4486ec5b67b60ec6ae7
|
import PythonQt
from PythonQt import QtGui, QtCore
from director import pointpicker
from director import vtkNumpy as vnp
from director import vtkAll as vtk
import numpy as np
class ImageView(object):
def __init__(self):
self.autoResetCamera = False
self.view = PythonQt.dd.ddQVTKWidgetView()
self.view.setWindowTitle('Image View')
self.imageActor = vtk.vtkImageActor()
self.setImage(vtk.vtkImageData())
self.view.renderer().AddActor(self.imageActor)
self.view.orientationMarkerWidget().Off()
self.setBackgroundColor([0,0,0])
self.initInteractor()
self.installEventFilter()
self.resetCamera()
def installEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.addFilteredEventType(QtCore.QEvent.Resize)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
def initInteractor(self):
self.view.installImageInteractor()
self.interactorStyle = self.view.renderWindow().GetInteractor().GetInteractorStyle()
self.interactorStyle.AddObserver('SelectionChangedEvent', self.onRubberBandPick)
def initPointPicker(self):
self.pointPicker = pointpicker.ImagePointPicker(self, callback=self.onPickedPoints)
self.pointPicker.start()
def onPickedPoints(self, *points):
self.pickedPoints = points
def onRubberBandPick(self, obj, event):
displayPoints = self.interactorStyle.GetStartPosition(), self.interactorStyle.GetEndPosition()
self.rubberBandPickPoints = [self.getImagePixel(p) for p in displayPoints]
def setBackgroundColor(self, color):
self.view.renderer().SetBackground(color)
self.view.renderer().SetBackground2(color)
def getImagePixel(self, displayPoint, restrictToImageDimensions=True):
worldPoint = [0.0, 0.0, 0.0, 0.0]
vtk.vtkInteractorObserver.ComputeDisplayToWorld(self.view.renderer(), displayPoint[0], displayPoint[1], 0, worldPoint)
imageDimensions = self.getImage().GetDimensions()
if 0.0 <= worldPoint[0] <= imageDimensions[0] and 0.0 <= worldPoint[1] <= imageDimensions[1] or not restrictToImageDimensions:
return [worldPoint[0], worldPoint[1], 0.0]
else:
return None
def resizeView(self, scale=1.0):
image = self.getImage()
assert image
width, height, _ = image.GetDimensions()
assert width > 0 and height > 0
self.view.resize(width * scale, height * scale)
self.resetCamera()
def show(self):
if not self.view.isVisible() and self.getImage():
self.resizeView()
self.view.show()
def filterEvent(self, obj, event):
if event.type() == QtCore.QEvent.MouseButtonDblClick:
self.eventFilter.setEventHandlerResult(True)
elif event.type() == QtCore.QEvent.Resize:
if self.autoResetCamera:
self.resetCamera()
elif event.type() == QtCore.QEvent.KeyPress:
if str(event.text()).lower() == 'p':
self.eventFilter.setEventHandlerResult(True)
elif str(event.text()).lower() == 'r':
self.eventFilter.setEventHandlerResult(True)
self.resetCamera()
def setImage(self, image):
if image != self.getImage():
self.imageActor.SetInputData(image)
self.resetCamera()
def getImage(self):
return self.imageActor.GetInput()
def resetCamera(self):
camera = self.view.camera()
camera.ParallelProjectionOn()
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,1)
camera.SetViewUp(0,1,0)
self.view.resetCamera()
self.fitImageToView()
self.view.render()
def fitImageToView(self):
viewWidth, viewHeight = self.view.renderWindow().GetSize()
if viewHeight == 0:
return
camera = self.view.camera()
image = self.getImage()
imageWidth, imageHeight, _ = image.GetDimensions()
aspectRatio = float(viewWidth)/viewHeight
parallelScale = max(imageWidth/aspectRatio, imageHeight) / 2.0
camera.SetParallelScale(parallelScale)
def showNumpyImage(self, img, flip=True):
image = self.getImage()
if not image:
image = vtk.vtkImageData()
self.setImage(image)
if flip:
img = np.flipud(img)
height, width, numChannels = img.shape
dims = image.GetDimensions()
if dims[0] != width or dims[1] != height or image.GetNumberOfScalarComponents() != numChannels:
image.SetDimensions(width, height, 1)
image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, numChannels)
scalars = vnp.getNumpyFromVtk(image, 'ImageScalars')
if numChannels > 1:
scalars[:] = img.reshape(width*height, numChannels)[:]
else:
scalars[:] = img.reshape(width*height)[:]
image.Modified()
self.view.render()
|
patmarion/director
|
src/python/director/imageview.py
|
Python
|
bsd-3-clause
| 5,326
|
[
"VTK"
] |
8b749d4a21fb5482e08ad70d7d8b66d23589eb98b669d9bc595fc4826fe67730
|
# This file is part of MOOSE simulator: http://moose.ncbs.res.in.
# MOOSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MOOSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
Description: class NetworkML for loading NetworkML from file or xml element
into MOOSE
Version 1.0 by Aditya Gilra, NCBS, Bangalore, India, 2011 for serial MOOSE
Version 1.5 by Niraj Dudani, NCBS, Bangalore, India, 2012, ported to parallel
MOOSE
Version 1.6 by Aditya Gilra, NCBS, Bangalore, India, 2012, further changes for
parallel MOOSE
Version 1.7 by Aditya Gilra, NCBS, Bangalore, India, 2013, further support for
NeuroML 1.8.1
NeuroML.py is the preferred interface. Use this only if NeuroML L1,L2,L3
files are misnamed/scattered. Instantiate NetworlML class, and thence use
method: readNetworkMLFromFile(...) to load a standalone NetworkML file, OR
readNetworkML(...) to load from an xml.etree xml element (could be part of a
larger NeuroML file).
"""
from xml.etree import cElementTree as ET
import string
import os
import sys
import MorphML
import ChannelML
import moose
import moose.neuroml.utils as nmu
import moose.utils as utils
import inspect
import helper.xml_methods as xml_methods
import helper.moose_methods as moose_methods
import core.stimulus as stimulus
import core.config as config
import debug.bugs as bugs
import matplotlib.pyplot as plt
import numpy as np
import re
from math import cos, sin
class NetworkML(object):
def __init__(self, nml_params):
self.populationDict = dict()
self.libraryPath = config.libraryPath
moose.Neutral(self.libraryPath)
self.cellPath = self.libraryPath
moose.Neutral(self.cellPath)
moose.Neutral(self.libraryPath)
self.cellDictBySegmentId={}
self.cellDictByCableId={}
self.nml_params = nml_params
self.modelDir = nml_params['model_dir']
self.elecPath = config.elecPath
self.dt = 1e-3 # In seconds.
self.simTime = 1000e-3
self.plotPaths = 'figs'
def connectWrapper(self, src, srcF, dest, destF, messageType='Single'):
"""
Wrapper around moose.connect
"""
utils.dump("INFO"
, "Connecting ({4})`\n\t{0},{1}`\n\t`{2},{3}".format(
src.path
, srcF
, dest.path
, destF
, messageType
)
, frame = inspect.currentframe()
)
try:
res = moose.connect(src, srcF, dest, destF, messageType)
assert res, "Failed to connect"
except Exception as e:
utils.dump("ERROR", "Failed to connect.")
raise e
def plotVector(self, vector, plotname):
''' Saving input vector to a file '''
name = plotname.replace('/', '_')
fileName = os.path.join(self.plotPaths, name)+'.eps'
utils.dump("DEBUG"
, "Saving vector to a file {}".format(fileName)
)
plt.vlines(vector, 0, 1)
plt.savefig(fileName)
def readNetworkMLFromFile(self, filename, cellSegmentDict, params={}):
""" readNetworkML
specify tweak
params = {
'excludePopulations':[popname1,...]
, 'excludeProjections':[projname1,...]
, 'onlyInclude':{'includePopulation':(popname,[id1,...])
,'includeProjections':(projname1,...)
}
}
If excludePopulations is present, then excludeProjections must also be
present. Thus if you exclude some populations, ensure that you exclude
projections that refer to those populations also! Though for
onlyInclude, you may specify only included cells and this reader will
also keep cells connected to those in onlyInclude. This reader first
prunes the exclude-s, then keeps the onlyInclude-s and those that are
connected. Use 'includeProjections' if you want to keep some
projections not connected to the primary 'includePopulation' cells but
connected to secondary cells that connected to the primary ones: e.g.
baseline synapses on granule cells connected to 'includePopulation'
mitrals; these synapses receive file based pre-synaptic events, not
presynaptically connected to a cell.
"""
utils.dump("INFO", "reading file %s ... " % filename)
tree = ET.parse(filename)
root_element = tree.getroot()
utils.dump("INFO", "Tweaking model ... ")
utils.tweak_model(root_element, params)
utils.dump("INFO", "Loading model into MOOSE ... ")
return self.readNetworkML(
root_element
, cellSegmentDict
, params
, root_element.attrib['lengthUnits']
)
def readNetworkML(self, network, cellSegmentDict , params={}
, lengthUnits="micrometer"):
"""
This returns
populationDict = {
'populationName1':(cellname
, {int(instanceid1):moosecell, ...F}) , ...
}
projectionDict = {
'projectionName1':(source,target
,[(syn_name1,pre_seg_path,post_seg_path),...])
, ...
}
"""
if lengthUnits in ['micrometer','micron']:
self.length_factor = 1e-6
else:
self.length_factor = 1.0
self.network = network
self.cellSegmentDict = cellSegmentDict
self.params = params
utils.dump("STEP", "Creating populations ... ")
self.createPopulations()
utils.dump("STEP", "Creating connections ... ")
self.createProjections()
# create connections
utils.dump("STEP", "Creating inputs in %s .. " % self.elecPath)
# create inputs (only current pulse supported)
self.createInputs()
return (self.populationDict, self.projectionDict)
def createInputs(self):
""" createInputs
Create inputs as given in NML file and attach them to moose.
"""
for inputs in self.network.findall(".//{"+nmu.nml_ns+"}inputs"):
units = inputs.attrib['units']
# This dict is to be passed to function which attach an input
# element to moose.
factors = {}
# see pg 219 (sec 13.2) of Book of Genesis
if units == 'Physiological Units':
factors['Vfactor'] = 1e-3 # V from mV
factors['Tfactor'] = 1e-3 # s from ms
factors['Ifactor'] = 1e-6 # A from microA
else:
utils.dump("NOTE", "We got {0}".format(units))
factors['Vfactor'] = 1.0
factors['Tfactor'] = 1.0
factors['Ifactor'] = 1.0
for inputElem in inputs.findall(".//{"+nmu.nml_ns+"}input"):
self.attachInputToMoose(inputElem, factors)
def attachInputToMoose(self, inElemXml, factors, savePlot=True):
"""attachInputToMoose
This function create StimulousTable in moose
There can be two type of stimulous: random_stim or pulse_input.
"""
# If /elec doesn't exists it creates /elec and returns a reference to
# it. If it does, it just returns its reference.
moose.Neutral(self.elecPath)
inputName = inElemXml.get('name')
random_stim = inElemXml.find('.//{'+nmu.nml_ns+'}random_stim')
pulse_stim = inElemXml.find('.//{'+nmu.nml_ns+'}pulse_input')
if random_stim is not None:
utils.dump("INFO", "Generating random stimulous")
utils.dump("TODO", "Test this Poission spike train table")
# Get the frequency of stimulus
frequency = moose_methods.toFloat(
random_stim.get('frequency', '1.0')
) / factors['Tfactor']
amplitude = random_stim.get('amplitude', 1.0)
synpMechanism = random_stim.get('synaptic_mechanism')
# Create random stimulus
vec = stimulus.generateSpikeTrainPoission(frequency
, dt=self.dt
, simTime=self.simTime
)
# Stimulus table
tablePath = os.path.join(self.elecPath, "Stimulus")
moose.Neutral(tablePath)
stimPath = os.path.join(tablePath, inputName)
stim = moose.TimeTable(stimPath)
stim.vec = vec
if savePlot:
self.plotVector(vec, tablePath)
target = inElemXml.find(".//{"+nmu.nml_ns+"}target")
population = target.get('population')
for site in target.findall(".//{"+nmu.nml_ns+"}site"):
cell_id = site.attrib['cell_id']
if 'segment_id' in site.attrib:
segment_id = site.attrib['segment_id']
else:
# default segment_id is specified to be 0
segment_id = 0
# To find the cell name fetch the first element of tuple.
cell_name = self.populationDict[population][0]
if cell_name == 'LIF':
utils.dump("NOTE",
"LIF cell_name. Partial implementation"
, frame = inspect.currentframe()
)
LIF = self.populationDict[population][1][int(cell_id)]
m = self.connectSynapse(stim, LIF)
else:
segId = '{0}'.format(segment_id)
segment_path = self.populationDict[population][1]\
[int(cell_id)].path + '/' + \
self.cellSegmentDict[cell_name][segId][0]
compartment = moose.Compartment(segment_path)
synchan = moose.SynChan(
os.path.join(compartment.path , '/synchan')
)
synchan.Gbar = 1e-6
synchan.Ek = 0.0
self.connectWrapper(synchan, 'channel', compartment, 'channel')
synchan.numSynapses = 1
m = self.connectSynapse(stim, moose.element(synchan.path+'/synapse'))
elif pulse_stim is not None:
Ifactor = factors['Ifactor']
Tfactor = factors['Tfactor']
pulseinput = inElemXml.find(".//{"+nmu.nml_ns+"}pulse_input")
if pulseinput is None:
utils.dump("WARN"
, "This type of stimulous is not supported."
, frame = inspect.currentframe()
)
return
self.pulseGenPath = self.elecPath + '/PulseGen'
moose.Neutral(self.pulseGenPath)
pulseGenPath = '{}/{}'.format(self.pulseGenPath, inputName)
pulsegen = moose.PulseGen(pulseGenPath)
icClampPath = '{}/{}'.format(self.elecPath, 'iClamp')
moose.Neutral(icClampPath)
iclamp = moose.DiffAmp('{}/{}'.format(icClampPath, inputName))
iclamp.saturation = 1e6
iclamp.gain = 1.0
# free run
pulsegen.trigMode = 0
pulsegen.baseLevel = 0.0
pulsegen.firstDelay = float(pulseinput.attrib['delay'])*Tfactor
pulsegen.firstWidth = float(pulseinput.attrib['duration'])*Tfactor
pulsegen.firstLevel = float(pulseinput.attrib['amplitude'])*Ifactor
# to avoid repeat
pulsegen.secondDelay = 1e6
pulsegen.secondLevel = 0.0
pulsegen.secondWidth = 0.0
# do not set count to 1, let it be at 2 by default else it will
# set secondDelay to 0.0 and repeat the first pulse!
#pulsegen.count = 1
self.connectWrapper(pulsegen,'output', iclamp, 'plusIn')
# Attach targets
target = inElemXml.find(".//{"+nmu.nml_ns+"}target")
population = target.attrib['population']
for site in target.findall(".//{"+nmu.nml_ns+"}site"):
cell_id = site.attrib['cell_id']
if 'segment_id' in site.attrib:
segment_id = site.attrib['segment_id']
else:
# default segment_id is specified to be 0
segment_id = 0
# population is populationName,
# self.populationDict[population][0] is cellname
cell_name = self.populationDict[population][0]
if cell_name == 'LIF':
debug.printDebut("TODO"
, "Rewrite this section"
, frame = inspect.currentframe()
)
continue
LIF = self.populationDict[population][1][int(cell_id)]
self.connectWrapper(iclamp,'output',LIF,'injectMsg')
else:
segment_path = self.populationDict[population][1]\
[int(cell_id)].path+'/'+\
self.cellSegmentDict[cell_name][segment_id][0]
compartment = moose.Compartment(segment_path)
self.connectWrapper(iclamp
,'output'
, compartment
,'injectMsg'
)
def createPopulations(self):
"""
Create population dictionary.
"""
populations = self.network.findall(".//{"+nmu.nml_ns+"}population")
if not populations:
utils.dump("WARN"
, [
"No population find in model"
, "Searching in namespace {}".format(nmu.nml_ns)
]
, frame = inspect.currentframe()
)
for population in populations:
cellname = population.attrib["cell_type"]
populationName = population.attrib["name"]
utils.dump("INFO"
, "Loading population `{0}`".format(populationName)
)
# if cell does not exist in library load it from xml file
if not moose.exists(self.libraryPath+'/'+cellname):
utils.dump("DEBUG"
, "Searching in subdirectories for cell types" +
" in `{0}.xml` and `{0}.morph.xml` ".format(cellname)
)
mmlR = MorphML.MorphML(self.nml_params)
model_filenames = (cellname+'.xml', cellname+'.morph.xml')
success = False
for modelFile in model_filenames:
model_path = nmu.find_first_file(modelFile
, self.modelDir
)
if model_path is not None:
cellDict = mmlR.readMorphMLFromFile(model_path)
success = True
break
if not success:
raise IOError(
'For cell {0}: files {1} not found under {2}.'.format(
cellname, model_filenames, self.modelDir
)
)
self.cellSegmentDict.update(cellDict)
if cellname == 'LIF':
cellid = moose.LeakyIaF(self.libraryPath+'/'+cellname)
else:
# added cells as a Neuron class.
cellid = moose.Neuron(self.libraryPath+'/'+cellname)
self.populationDict[populationName] = (cellname,{})
for instance in population.findall(".//{"+nmu.nml_ns+"}instance"):
instanceid = instance.attrib['id']
location = instance.find('./{'+nmu.nml_ns+'}location')
rotationnote = instance.find('./{'+nmu.meta_ns+'}notes')
if rotationnote is not None:
# the text in rotationnote is zrotation=xxxxxxx
zrotation = float(string.split(rotationnote.text,'=')[1])
else:
zrotation = 0
if cellname == 'LIF':
cell = moose.LeakyIaF(cellid)
self.populationDict[populationName][1][int(instanceid)] = cell
else:
# No Cell class in MOOSE anymore! :( addded Neuron class -
# Chaitanya
cell = moose.Neuron(cellid)
self.populationDict[populationName][1][int(instanceid)] = cell
x = float(location.attrib['x']) * self.length_factor
y = float(location.attrib['y']) * self.length_factor
z = float(location.attrib['z']) * self.length_factor
self.translate_rotate(cell, x, y, z, zrotation)
# recursively translate all compartments under obj
def translate_rotate(self,obj,x,y,z,ztheta):
for childId in obj.children:
childobj = moose.Neutral(childId)
# if childobj is a compartment or symcompartment translate, else
# skip it
if childobj.className in ['Compartment','SymCompartment']:
# SymCompartment inherits from Compartment, so below wrapping by
# Compartment() is fine for both Compartment and SymCompartment
child = moose.Compartment(childId)
x0 = child.x0
y0 = child.y0
x0new = x0 * cos(ztheta) - y0 * sin(ztheta)
y0new = x0 * sin(ztheta) + y0 * cos(ztheta)
child.x0 = x0new + x
child.y0 = y0new + y
child.z0 += z
x1 = child.x
y1 = child.y
x1new = x1 * cos(ztheta) - y1 * sin(ztheta)
y1new = x1 * sin(ztheta) + y1 * cos(ztheta)
child.x = x1new + x
child.y = y1new + y
child.z += z
if len(childobj.children)>0:
# recursive translation+rotation
self.translate_rotate(childobj
, x
, y
, z
, ztheta
)
def getCellPath(self, populationType, instanceId):
''' Given a population type and instanceId, return its path '''
try:
path = self.populationDict[populationType][1]
except KeyError as e:
utils.dump("ERROR"
, [ "Population type `{0}` not found".format(populationType)
, "Availale population in network are "
, self.populationDict.keys()
]
)
raise KeyError("Missing population type : {}".format(populationType))
except Exception as e:
raise e
try:
path = path[instanceId]
except KeyError as e:
msg = "Population type {} has no instance {}".format(
populationType
, instanceId
)
utils.dump("ERROR"
, [msg , "Available instances are" , path.keys() ]
)
raise KeyError(msg)
# Now get the path from moose path
path = path.path
if not re.match(r'(\/\w+)+', path):
raise UserWarning("{} is not a valid path".format(path))
return path
def addConnection(self, connection, projection, options):
"""
This function adds connection
"""
synName = options['syn_name']
source = options['source']
target = options['target']
weight = options['weight']
threshold = options['threshold']
propDelay = options['prop_delay']
projectionName = projection.attrib['name']
pre_cell_id = connection.attrib['pre_cell_id']
post_cell_id = connection.attrib['post_cell_id']
if 'file' not in pre_cell_id:
# source could be 'mitrals', self.populationDict[source][0] would be
# 'mitral'
pre_cell_id = int(pre_cell_id)
post_cell_id = int(post_cell_id)
pre_cell_name = self.populationDict[source][0]
pre_segment_id = connection.attrib.get('pre_segment_id', 0)
pre_segment_path = "{0}/{1}".format(
self.getCellPath(source, pre_cell_id)
, self.cellSegmentDict[pre_cell_name][pre_segment_id][0]
)
else:
# I've removed extra excitation provided via files, so below comment
# doesn't apply. 'file[+<glomnum>]_<filenumber>' # glomnum is for
# mitral_granule extra excitation from unmodelled sisters.
pre_segment_path = "{}_{}".format(
pre_cell_id
, connection.attrib['pre_segment_id']
)
# target could be 'PGs', self.populationDict[target][0] would be 'PG'
post_cell_name = self.populationDict[target][0]
post_segment_id = connection.attrib.get('post_segment_id', '0')
post_segment_path = "{}/{}".format(
self.getCellPath(target, post_cell_id)
, self.cellSegmentDict[post_cell_name][post_segment_id][0]
)
try:
self.projectionDict[projectionName][2].append(
(synName , pre_segment_path, post_segment_path)
)
except KeyError as e:
utils.dump("ERR", "Failed find key {0}".format(e)
, frame = inspect.currentframe())
print self.projectionDict.keys()
sys.exit(0)
properties = connection.findall('./{'+nmu.nml_ns+'}properties')
if len(properties) == 0:
self.connectUsingSynChan(synName, pre_segment_path, post_segment_path
, weight, threshold, propDelay
)
else:
[self.addProperties(pre_segment_path, post_segment_path, p, options)
for p in properties]
def addProperties(self, pre_segment_path, post_segment_path, props, options):
'''Add properties
'''
synName = options['syn_name']
source = options['source']
target = options['target']
weight = options['weight']
threshold = options['threshold']
propDelay = options['prop_delay']
synapse = props.attrib.get('synapse_type', None)
if not synapse:
utils.dump("WARN"
, "Synapse type {} not found.".format(synapse)
, frame = inspect.currentframe()
)
raise UserWarning("Missing parameter synapse_type")
synName = synapse
weight_override = float(props.attrib['weight'])
if 'internal_delay' in props.attrib:
delay_override = float(props.attrib['internal_delay'])
else: delay_override = propDelay
if weight_override != 0.0:
self.connectUsingSynChan(synName
, pre_segment_path
, post_segment_path
, weight_override
, threshold, delay_override
)
else: pass
def createProjections(self):
self.projectionDict={}
projections = self.network.find(".//{"+nmu.nml_ns+"}projections")
if projections is not None:
if projections.attrib["units"] == 'Physiological Units':
# see pg 219 (sec 13.2) of Book of Genesis
self.Efactor = 1e-3 # V from mV
self.Tfactor = 1e-3 # s from ms
else:
self.Efactor = 1.0
self.Tfactor = 1.0
[self.createProjection(p) for p in
self.network.findall(".//{"+nmu.nml_ns+"}projection")]
def createProjection(self, projection):
projectionName = projection.attrib["name"]
utils.dump("INFO", "Projection {0}".format(projectionName))
source = projection.attrib["source"]
target = projection.attrib["target"]
self.projectionDict[projectionName] = (source,target,[])
# TODO: Assuming that only one element <synapse_props> under
# <projection> element.
synProps = projection.find(".//{"+nmu.nml_ns+"}synapse_props")
options = self.addSyapseProperties(projection, synProps, source, target)
connections = projection.findall(".//{"+nmu.nml_ns+"}connection")
[self.addConnection(c, projection, options) for c in connections]
def addSyapseProperties(self, projection, syn_props, source, target):
'''Add Synapse properties'''
synName = syn_props.attrib['synapse_type']
## if synapse does not exist in library load it from xml file
if not moose.exists(os.path.join(self.libraryPath, synName)):
cmlR = ChannelML.ChannelML(self.nml_params)
modelFileName = synName+'.xml'
model_path = nmu.find_first_file(modelFileName
, self.modelDir
)
if model_path is not None:
cmlR.readChannelMLFromFile(model_path)
else:
msg = 'For mechanism {0}: files {1} not found under {2}.'.format(
synName, modelFileName, self.modelDir
)
raise UserWarning(msg)
weight = float(syn_props.attrib['weight'])
threshold = float(syn_props.attrib['threshold']) * self.Efactor
if 'prop_delay' in syn_props.attrib:
propDelay = float(syn_props.attrib['prop_delay']) * self.Tfactor
elif 'internal_delay' in syn_props.attrib:
propDelay = float(syn_props.attrib['internal_delay']) * self.Tfactor
else:
propDelay = 0.0
options = { 'syn_name' : synName , 'weight' : weight
, 'source' : source , 'target' : target
, 'threshold' : threshold , 'prop_delay' : propDelay
}
return options
def connectSynapse(self, spikegen, synapse):
''' Add synapse. '''
assert isinstance(synapse, moose.SynChan), type(synapse)
#utils.dump("INFO"
# , "Connecting ({})\n\t`{}`\n\t`{}`".format(
# "Sparse"
# , spikegen.path
# , synapse.vec.path
# )
# , frame = inspect.currentframe()
# )
# Following 6 lines are from snippet Izhikevich_with_synapse.py file. I
# am not sure whether this is the right way to make a synpase. However,
# let's try this till we get a non-zero result after simulation.
spikeStim = moose.PulseGen('%s/spike_stim' % (synapse.parent.path))
spikeStim.delay[0] = 50.0
spikeStim.level[0] = 1.0
spikeStim.width[0] = 100.0
moose.connect(spikeStim, 'output', spikegen, 'Vm')
m = moose.connect(spikegen, "spikeOut"
, synapse.synapse.vec, "addSpike"
, "Sparse"
)
m.setRandomConnectivity(1.0, 1)
return m
def connectUsingSynChan(self, synName, prePath, post_path
, weight, threshold, delay
):
"""
Connect two compartments using SynChan
"""
postcomp = moose.Compartment(post_path)
# We usually try to reuse an existing SynChan - event based SynChans
# have an array of weights and delays and can represent multiple
# synapses i.e. a new element of the weights and delays array is
# created every time a 'synapse' message connects to the SynChan (from
# 'event' of spikegen) BUT for a graded synapse with a lookup table
# output connected to 'activation' message, not to 'synapse' message, we
# make a new synapse everytime ALSO for a saturating synapse i.e.
# KinSynChan, we always make a new synapse as KinSynChan is not meant to
# represent multiple synapses
libsyn = moose.SynChan(self.libraryPath+'/'+synName)
gradedchild = utils.get_child_Mstring(libsyn, 'graded')
# create a new synapse
if libsyn.className == 'KinSynChan' or gradedchild.value == 'True':
synNameFull = moose_methods.moosePath(synName
, utils.underscorize(prePath)
)
synObj = self.makeNewSynapse(synName, postcomp, synNameFull)
else:
# See debug/bugs for more details.
# NOTE: Change the debug/bugs to enable/disable this bug.
if bugs.BUG_NetworkML_500:
utils.dump("INFO"
, "See the code. There might be a bug here"
, frame = inspect.currentframe()
)
synNameFull = moose_methods.moosePath(synName
, utils.underscorize(prePath)
)
synObj = self.makeNewSynapse(synName, postcomp, synNameFull)
else: # If the above bug is fixed.
synNameFull = synName
if not moose.exists(post_path+'/'+synNameFull):
synObj = self.makeNewSynapse(synName, postcomp, synNameFull)
# wrap the synapse in this compartment
synPath = moose_methods.moosePath(post_path, synNameFull)
syn = moose.SynChan(synPath)
gradedchild = utils.get_child_Mstring(syn, 'graded')
# weights are set at the end according to whether the synapse is graded
# or event-based
# connect pre-comp Vm (if graded) OR spikegen/timetable (if event-based)
# to the synapse
# graded synapse
if gradedchild.value=='True':
table = moose.Table(syn.path+"/graded_table")
# always connect source to input - else 'cannot create message'
# error.
precomp = moose.Compartment(prePath)
self.connectWrapper(precomp, "VmOut", table, "msgInput")
# since there is no weight field for a graded synapse
# (no 'synapse' message connected),
# I set the Gbar to weight*Gbar
syn.Gbar = weight * syn.Gbar
# Event based synapse
else:
# synapse could be connected to spikegen at pre-compartment OR a
# file!
if 'file' not in prePath:
precomp = moose.Compartment(prePath)
if not moose.exists(prePath+'/IaF_spikegen'):
# if spikegen for this synapse doesn't exist in this
# compartment, create it spikegens for different synapse_types
# can have different thresholds
if not moose.exists(prePath+'/'+synName+'_spikegen'):
spikegen = moose.SpikeGen(prePath+'/'+synName+'_spikegen')
# spikegens for different synapse_types can have different
# thresholds
spikegen.threshold = threshold
# This ensures that spike is generated only on leading edge.
spikegen.edgeTriggered = 1
# usually events are raised at every time step that Vm >
# Threshold, can set either edgeTriggered as above or
# refractT
#spikegen.refractT = 0.25e-3
# wrap the spikegen in this compartment
spikegen = moose.SpikeGen(prePath+'/'+synName+'_spikegen')
else:
spikegen = moose.SpikeGen(prePath+'/IaF_spikegen')
# connect the spikegen to the synapse note that you need to use
# Synapse (auto-created) under SynChan to get/set weights ,
# addSpike-s etc. can get the Synapse element by
# moose.Synapse(syn.path+'/synapse') or syn.synapse Synpase is
# an array element, first add to it, to addSpike-s, get/set
# weights, etc.
syn.numSynapses += 1
m = self.connectSynapse(spikegen, syn)
else:
# if connected to a file, create a timetable,
# put in a field specifying the connected filenumbers to this segment,
# and leave it for simulation-time connection
## prePath is 'file[+<glomnum>]_<filenum1>[_<filenum2>...]' i.e. glomnum could be present
filesplit = prePath.split('+')
if len(filesplit) == 2:
glomsplit = filesplit[1].split('_', 1)
glomstr = '_'+glomsplit[0]
filenums = glomsplit[1]
else:
glomstr = ''
filenums = prePath.split('_', 1)[1]
tt_path = postcomp.path+'/'+synNameFull+glomstr+'_tt'
if not moose.exists(tt_path):
# if timetable for this synapse doesn't exist in this
# compartment, create it, and add the field 'fileNumbers'
tt = moose.TimeTable(tt_path)
tt.addField('fileNumbers')
tt.setField('fileNumbers',filenums)
# Be careful to connect the timetable only once while
# creating it as below: note that you need to use Synapse
# (auto-created) under SynChan to get/set weights ,
# addSpike-s etc. can get the Synapse element by
# moose.Synapse(syn.path+'/synapse') or syn.synapse Synpase
# is an array element, first add to it, to addSpike-s,
# get/set weights, etc.
syn.numSynapses += 1
m = self.connectSynapse(spikegen, syn.synapse)
else:
# if it exists, append file number to the field 'fileNumbers'
tt = moose.TimeTable(tt_path)
# append filenumbers from
# 'file[+<glomnum>]_<filenumber1>[_<filenumber2>...]'
filenums = moose_methods.moosePath(tt.getField('fileNumbers')
, filenums)
tt.setField('fileNumbers', filenums)
# syn.Gbar remains the same, but we play with the weight which is a
# factor to Gbar The delay and weight can be set only after
# connecting a spike event generator. delay and weight are arrays:
# multiple event messages can be connected to a single synapse first
# argument below is the array index, we connect to the latest
# synapse created above But KinSynChan ignores weight of the
# synapse, so set the Gbar for it
if libsyn.className == 'KinSynChan':
syn.Gbar = weight*syn.Gbar
else:
# note that you need to use Synapse (auto-created) under SynChan
# to get/set weights , addSpike-s etc. can get the Synpase
# element by moose.Synapse(syn.path+'/synapse') or syn.synapse
syn.synapse[-1].weight = weight
syn.synapse[-1].delay = delay # seconds
#print 'len = ',len(syn.synapse)
#for i,syn_syn in enumerate(syn.synapse):
# print i,'th weight =',syn_syn.weight,'\n'
def makeNewSynapse(self, synName, postcomp, synNameFull):
'''This function creates a new synapses onto postcomp.
SpikeGen is spikeGenerator (presynaptic). SpikeGen connects to SynChan,
a synaptic channel which connects to post-synaptic compartment.
SpikeGen models the pre-synaptic events.
'''
synPath = "%s/%s" % (self.libraryPath, synName)
utils.dump("SYNAPSE"
, "Creating {} with path {} onto compartment {}".format(
synName
, synPath
, postcomp.path
)
)
# if channel does not exist in library load it from xml file
if not moose.exists(synPath):
utils.dump("SYNAPSE"
, "Synaptic Channel {} does not exists. {}".format(
synPath, "Loading is from XML file"
)
)
cmlR = ChannelML.ChannelML(self.nml_params)
cmlR.readChannelMLFromFile(synName+'.xml')
# deep copies the library synapse to an instance under postcomp named as
# <arg3>
if config.disbleCopyingOfObject:
utils.dump("WARN"
, "Copying existing SynChan ({}) to {}".format(
synPath
, postcomp
)
)
synid = moose.copy(moose.Neutral(synPath), postcomp, synNameFull)
else:
synid = synPath
syn = moose.SynChan(synid)
syn = self.configureSynChan(syn, synParams={})
childmgblock = utils.get_child_Mstring(syn,'mgblock')
# connect the post compartment to the synapse
# If NMDA synapse based on mgblock, connect to mgblock
if childmgblock.value == 'True':
mgblock = moose.Mg_block(syn.path+'/mgblock')
self.connectWrapper(postcomp, "channel", mgblock, "channel")
# if SynChan or even NMDAChan, connect normally
else:
self.connectWrapper(postcomp,"channel", syn, "channel")
def configureSynChan(self, synObj, synParams={}):
'''Configure synapse. If no parameters are given then use the default
values.
'''
assert(isinstance(synObj, moose.SynChan))
utils.dump("SYNAPSE"
, "Configuring SynChan"
)
synObj.tau1 = synParams.get('tau1', 5.0)
synObj.tau2 = synParams.get('tau2', 1.0)
synObj.Gk = synParams.get('Gk', 1.0)
synObj.Ek = synParams.get('Ek', 0.0)
synObj.synapse.num = synParams.get('synapse_num', 1)
synObj.synapse.delay = synParams.get('delay', 1.0)
synObj.synapse.weight = synParams.get('weight', 1.0)
return synObj
|
dilawar/moose-full
|
moose-core/python/libmumbl/nml_parser/NetworkML.py
|
Python
|
gpl-2.0
| 39,154
|
[
"MOOSE",
"NEURON"
] |
53bb578cfb05c2be42c55c20af79e044b311dae7f61e1c7822fe7aa10ce8396f
|
#!/usr/bin/env python
"""
Post-process the individual cell output files resulting from running the
OpenIFS single column model over a grid of cells.
The program takes a single input which is a configuration file in INI
format. This is the same configuration file used to run the model that
generated the column output files.
If you wish to exclude some variables from the processing, you may place
a file named 'dropvars.txt' in the current directory and write the names
of the variables you wish to exclude in there, one per line.
The program can use shared-memory parallelism, but has no support for MPI.
"""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime
from functools import partial
import logging
from multiprocessing import Pool
import os.path
import shutil
import sys
import numpy as np
from scmtiles import __version__ as scmtiles_version
from scmtiles.config import SCMTilesConfig
from scmtiles.exceptions import ConfigurationError
from scmtiles.grid_manager import GridManager
import xarray as xr
#: Version number of the post-processor.
__version__ = '1.0.alpha1'
#: Names of model output files.
SCM_OUT_FILES = ('diagvar.nc', 'diagvar2.nc', 'progvar.nc')
class Error(Exception):
"""Generic exception class for program errors."""
pass
def load_coordinate_templates(config):
"""
Loads the x and y grid coordinates from the SCM input file.
**Arguments:**
* config
An `~scmtiles.config.SCMTilesConfg` instance describing the run to
load coordinates from.
**Returns:**
* coord_dict
A mapping from coordinate name to xarray Coordinate objects.
"""
filename = config.input_file_pattern.format(time=config.start_time)
filepath = os.path.join(config.input_directory, filename)
try:
ds = xr.open_dataset(filepath)
x_coord = ds[config.xname]
y_coord = ds[config.yname]
x_coord.load()
y_coord.load()
ds.close()
except RuntimeError as e:
msg = 'Failed to open input file "{}": {!s}'
raise Error(msg.format(filepath, e))
except ValueError:
msg = ("Failed to extract template coordinates, check grid "
"dimensions in configuration match those in the files")
raise Error(msg)
return {config.xname: x_coord, config.yname: y_coord}
def pp_cell(cell, timestamp, coordinate_templates, drop_list, config):
"""
Post-process an individual cell.
**Aarguments:**
* cell
A `~scmtiles.grid_manager.Cell` instance identifying the cell.
* timestamp
A string timestamp used as part of the filename for the cell output
file.
* coordiate_templates
A dictionary mapping coordinate names to xarray coordinate objects, as
returned from `load_coorindate_templates`. This is used to lookup the
latitude and longitude of the cell from its indices.
* config
A `~scmtiles.config.SCMTilesConfig` instance describing the run being
post-processed.
**Returns:**
* (cell_ds, filepath)
A 2-tuple containing the cell data in an `xarray.Dataset` and the full
path to the file the cell data were loaded from.
"""
cell_id = 'y{:04d}x{:04d}'.format(cell.y_global, cell.x_global)
dirname = '{}.{}'.format(timestamp, cell_id)
dirpath = os.path.join(config.output_directory, dirname)
filepaths = [os.path.join(dirpath, filename) for filename in SCM_OUT_FILES]
# Load the cell dataset from file into memory, then close the input
# file to free the file handle.
try:
# Work-around for problem using open_mfdataset inside a
# multiprocessing pool where the load just waits indefinitely.
ds_list = [xr.open_dataset(fp, drop_variables=drop_list)
for fp in filepaths]
cell_ds = xr.auto_combine(ds_list)
cell_ds.load()
cell_ds.close()
for ds in ds_list:
ds.close()
except (OSError, RuntimeError):
msg = 'The input files "{!s}" cannot be read, do they exist?'
raise Error(msg.format(filepaths))
# Add scalar latitude and longitude coordinates and return the
# modified cell dataset:
x_value = coordinate_templates[config.xname][cell.x_global]
y_value = coordinate_templates[config.yname][cell.y_global]
cell_ds.coords.update({config.yname: y_value, config.xname: x_value})
return cell_ds, dirpath
def pp_tile(config, timestamp, coordinate_templates, drop_list, tile):
"""
Post-process a rectangular tile of cells.
**Arguments:**
* config
A `~scmtiles.config.SCMTilesConfig` instance describing the run being
post-processed.
* timestamp
A string timestamp used as part of the filename for the cell output
files.
* coordiate_templates
A dictionary mapping coordinate names to xarray coordinate objects, as
returned from `load_coorindate_templates`. This is used to lookup the
latitude and longitude of the cells from their indices.
* tile
A `~scmtiles.grid_manager.RectangularTile` instance describing the tile
to process.
**Returns:**
* (tile_ds, filepaths)
An `xarray.Dataset` representing the tile, and a list of paths to the
files that were loaded to form the tile.
"""
grid_rows = OrderedDict()
filepaths = []
for cell in tile.cells():
cell_ds, cell_filepath = pp_cell(cell, timestamp, coordinate_templates,
drop_list, config)
try:
grid_rows[cell.y_global].append(cell_ds)
except KeyError:
grid_rows[cell.y_global] = [cell_ds]
filepaths.append(cell_filepath)
for key, row in grid_rows.items():
grid_rows[key] = xr.concat(row, dim=config.xname)
if len(grid_rows) > 1:
tile_ds = xr.concat(grid_rows.values(), dim=config.yname)
else:
tile_ds, = grid_rows.values()
logger = logging.getLogger('PP')
logger.info('processing of tile #{} completed'.format(tile.id))
return tile_ds, filepaths
def post_process(config_file_path, num_processes, delete_cell_files=False):
"""
Post-process an SCMTiles model run by combining individual cell output
files into a single file for the whole grid.
**Arguments:**
* config_file_path
Th path to an `~scmtiles.config.SCMTilesConfig` instance describing
the run to be post-processed.
* num_processes
The number of processes to use to do the post-processing. You can
choose any positive integer number, although it is advised to match
against the resources you have available. One process per processor
is a sensible choice, but depending on your I/O performance you may
benefit from more or fewer processes than you have CPU cores available.
**Keyword arguments:**
* delete_cell_files
If `True` the files containing data for the individual cells will be
deleted once the whole grid file has been susccessfully to disk. If
`False` then the individual cell files will remain after processing.
The default is `False` (no files deleted).
"""
if num_processes < 1:
raise Error('number of processes must be positive')
try:
config = SCMTilesConfig.from_file(config_file_path)
except ConfigurationError as e:
raise Error(e)
tiles = GridManager(config.xsize,
config.ysize,
config.ysize).decompose_by_rows()
# Compute static data:
logger = logging.getLogger('PP')
logger.info('loading coordinate templates')
coordinate_templates = load_coordinate_templates(config)
timestamp = config.start_time.strftime('%Y%m%d_%H%M%S')
# Load a list of variables to drop, if one exists:
try:
with open('dropvars.txt', 'r') as f:
drop_list = [var.strip() for var in f.readlines()]
except IOError:
# If the file doesn't exist or we can't read it then we don't care,
# we just won't drop any variables in post-processing.
drop_list = []
process_pool = Pool(num_processes)
logger.info('dispatching tiles to {} workers'.format(num_processes))
results = process_pool.map(
partial(pp_tile, config, timestamp, coordinate_templates, drop_list),
tiles)
# The final dataset is formed by concatenating all the tiles along the
# y-grid axis.
dataset = xr.concat(sorted([ds for ds, _ in results],
key=lambda ds: ds[config.yname].values.max()),
dim=config.yname)
# Ensure the time dimension has with CF compliant units:
start_time = config.start_time.strftime('%FT%T')
base_time = np.datetime64('{}+0000'.format(start_time)) # UTC
time_unit = 'seconds since {}'.format(start_time)
dataset.coords['time'].values = dataset.coords['time'].values + base_time
dataset.coords['time'].encoding = {'units': time_unit, 'dtype': 'int32'}
# Set the encoding for each variable so we use compression and shuffle
# when saving to netCDF format, level 1 compression with a shuffle filter
# have been experimentally determined to be a balanced choice between file
# size and performance:
encoding = {'zlib': True, 'complevel': 1, 'shuffle': True}
for var_name in dataset.variables.keys():
dataset[var_name].encoding.update(encoding)
# We want to serialize using a conventional coordinate order of time first,
# level second, and grid dimensions last in the order latitude then
# longitude. This ordering can be supplied to the dataset transpose method.
transposed_coords = ('time',
'nlev',
'nlevp1',
'nlevs',
'norg',
'ntiles',
'ncextr',
config.yname,
config.xname)
# Write the output to a netcdf file.
output_filename = 'scm_out.{}.nc'.format(timestamp)
try:
os.makedirs(config.archive_directory, exist_ok=True)
except (PermissionError, OSError):
msg = 'cannot create archive directory: {!s}'
raise Error(msg.format(config.archive_directory))
output_filepath = os.path.join(config.archive_directory, output_filename)
logger.info('writing combined output file: {!s}'.format(output_filepath))
try:
dataset.transpose(*transposed_coords).to_netcdf(output_filepath)
except RuntimeError as e:
Error('failed write grid to disk: {!s}'.format(e))
if delete_cell_files:
logger.info('deleting individual column files')
for dlist in (dl for _, dl in results):
for dp in dlist:
shutil.rmtree(dp)
def main(argv=None):
if argv is None:
argv = sys.argv
# Set up command line argument parsing.
ap = ArgumentParser(description=__doc__)
ap.add_argument('-n', '--num-processes', type=int, default=1,
help="Number of processes to use.")
ap.add_argument('-d', '--delete', action='store_true', default=False,
help='delete input column files after processing')
ap.add_argument('config_file_path', type=str,
help='path to the program configuration file')
# Parse the given arguments, this will handle errors gracefully and print
# a helpful message to the screen if an error occurs.
argns = ap.parse_args(argv[1:])
# Initialize the logging system:
logger = logging.getLogger('PP')
logger.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(logging.Formatter(
'[%(asctime)s] (%(name)s) %(levelname)s %(message)s',
datefmt='%Y-%m%d %H:%M:%S'))
logger.addHandler(log_handler)
try:
# Run the post-processor.
logger.info('Running {} at version {}'.format(argv[0], __version__))
logger.info('Backend scmtiles is version {}'.format(scmtiles_version))
post_process(argns.config_file_path, argns.num_processes,
delete_cell_files=argns.delete)
except Error as e:
logger.error('{!s}'.format(e))
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
aopp-pred/openifs-scmtiles
|
openifs_pp_main.py
|
Python
|
apache-2.0
| 13,073
|
[
"NetCDF"
] |
5c1a29abc0dc4db651b451e287800187bbe752e55cb07784a708d3769d5eb71f
|
from abc import ABCMeta, abstractmethod
import os
import subprocess
import collections
import psutil
import shlex
from pychemia import pcm_log
class CodeRun:
__metaclass__ = ABCMeta
def __init__(self, executable, workdir='.', use_mpi=False):
"""
CodeRun is the superclass defining the operations for running codes either directly or asynchronously via a
submission script on a cluster.
:param executable: Name or Path to the executable if can be found on the $PATH or path to the executable
:param workdir: Path to a folder where input and output will be located (Default: '.')
:param use_mpi: True if code relies on MPI for execution (Default: False)
"""
self.stdin_file = None
self.stdout_file = None
self.stderr_file = None
self.stdin_filename = None
self.stdout_filename = None
self.stderr_filename = None
self.input_path = None
self.input = None
self.executable = executable
self.workdir = workdir
self.use_mpi = use_mpi
self.runner = None
@abstractmethod
def set_inputs(self):
"""
This method must be implemented by child classes, it should write all the input files and prepare environment
for execution
:return: None
"""
pass
@abstractmethod
def get_outputs(self):
"""
This method must be implemented by child classes, it check the existance of output files and prepare the
reading and parsing of output data.
:return: None
"""
pass
def run(self, num_threads=None, mpi_num_procs=None, nodefile=None, wait=True, verbose=False):
"""
Run the executable and return a reference to the subprocess
created. The execution can set a number of threading variables to control their number.
If the code uses MPI the variable mpi_num_procs control the number of processes used.
:param num_threads: Control the number of threads in multithreading applications. There are several environment
variables used for that purpose. The default is None, meaning that not variable is set,
if an integer is given only OMP_NUM_THREADS is set dynamically for the run if a dictionary
of variables is provided with keys OMP_NUM_THREADS, OPENBLAS_NUM_THREADS, GOTO_NUM_THREADS
and MKL_NUM_THREADS, the corresponding integers associated to those variables are used to
set the number of corresponding threads.
:param mpi_num_procs: Number of MPI processes, if None and the code uses MPI, the default is to set the maximum
number of cores available on the system.
:param nodefile: If a nodefile is provided, and the code uses MPI, the contents are used to select the number of
of processes created. Setting nodefile does not override the use of mpi_num_procs but if
mpi_num_procs is None it will assume the number of processes equal to the machines
declared on nodefile.
:param wait: If True the process waits until the command is completed, otherwise a Popen instance is returned
:param verbose: Print extra information before and after the execution
:return:
"""
cwd = os.getcwd()
pcm_log.debug("Current location=%s, workdir=%s" % (cwd, self.workdir))
os.chdir(self.workdir)
env_vars = ''
if num_threads is not None:
if isinstance(num_threads, int):
env_vars = 'OMP_NUM_THREADS='+str(num_threads)
elif isinstance(num_threads, dict):
for evar in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'GOTO_NUM_THREADS', 'MKL_NUM_THREADS']:
if evar in num_threads and isinstance(num_threads[evar], int) and num_threads[evar] > 0:
env_vars += evar+'='+str(num_threads[evar])+' '
subprocess.check_output('export %s=%d' % (evar, num_threads), shell=True)
envvars=subprocess.check_output('echo $%s' % evar, shell=True)
print(envvars.decode())
if self.stdin_filename is not None:
self.stdin_file = open(self.stdin_filename, 'r')
if self.stdout_filename is not None:
self.stdout_file = open(self.stdout_filename, 'w')
if self.stderr_filename is not None:
self.stderr_file = open(self.stderr_filename, 'w')
# Checking availability of executable
if not os.path.exists(self.executable):
try:
which_bin = subprocess.check_output('which %s' % self.executable, shell=True)
except subprocess.CalledProcessError:
raise ValueError('ERROR: Executable %s could not be found as an absolute, relative or via the $PATH variable' %
self.executable)
exec_path = which_bin.decode('utf8').strip()
else:
exec_path = self.executable
pcm_log.debug("Executable: %s " % exec_path)
if self.use_mpi:
np = 2
if mpi_num_procs is None:
np = psutil.cpu_count(logical=False)
elif isinstance(mpi_num_procs, int):
np = mpi_num_procs
else:
print("WARNING: Declared variable mpi_num_procs is not integer, defaulting to 2 process")
command = 'mpirun -n %d %s' % (np, self.executable)
else:
command = "%s" % self.executable
pcm_log.debug("Running: %s" % command)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=self.stdin_file)
while True:
output = process.stdout.readline()
if output == b'' and process.poll() is not None:
break
if output != b'' and process.poll() is not None:
pcm_log.debug("process.poll() is %s" % process.poll())
pcm_log.debug(output)
if output and verbose:
print(output.decode(), end='')
rc = process.poll()
self.runner = process
os.chdir(cwd)
return process
# if wait:
# self.runner.wait()
# if verbose:
# print("Program finished with returncode: %d" % self.runner.returncode)
# os.chdir(cwd)
# return self.runner
class CodeInput(collections.abc.MutableMapping):
__metaclass__ = ABCMeta
def __init__(self):
self.input_file = None
self.variables = {}
def __contains__(self, x):
if not self.is_hierarchical:
return x in self.variables
else:
return x[1] in self.variables[x[0]]
def __delitem__(self, x):
if not self.is_hierarchical:
return self.variables.__delitem__(x)
else:
return self.variables[x[0]].__delitem__(x[1])
def __setitem__(self, x, value):
if not self.is_hierarchical:
return self.variables.__setitem__(x, value)
else:
return self.variables[x[0]].__setitem__(x[1], value)
def __getitem__(self, x):
if not self.is_hierarchical:
return self.variables.__getitem__(x)
else:
return self.variables[x[0]].__getitem__(x[1])
def __iter__(self):
return self.variables.__iter__()
def __len__(self):
return self.variables.__len__()
@abstractmethod
def read(self):
pass
@abstractmethod
def __str__(self):
pass
def write(self, filename=None):
"""
Write an input object into a text
file that ABINIT can use as an input
file
Args:
filename:
The 'abinit.in' filename that will be written
"""
if filename is None:
if self.input_file is not None:
filename = self.input_file
else:
raise ValueError("Not filename indicated")
wf = open(filename, 'w')
wf.write(self.__str__())
wf.close()
def has_variable(self, varname, section=None):
if self.is_hierarchical:
if section is None:
raise ValueError('ERROR: Input variables are hierachical and not section was declared')
else:
if section in self.variables and varname in self.variables[section]:
return True
else:
return False
else:
if varname in self.variables:
return True
else:
return False
def get_variable(self, varname, section=None):
if self.is_hierarchical:
if section is None:
raise ValueError('ERROR: Input variables are hierachical and not section was declared')
else:
if self.has_variable(varname, section=section):
return self.variables[section][varname]
else:
return None
else:
if self.has_variable(varname):
return self.variables[varname]
else:
return None
def set_variable(self, varname, value, section=None):
if self.is_hierarchical:
if section is None:
raise ValueError('ERROR: Input variables are hierarchical and not section was declared')
else:
self.variables[section][varname] = value
else:
self.variables[varname] = value
@property
def get_number_variables(self):
if not self.is_hierarchical:
return len(self.variables)
else:
ret = {}
for i in self.variables:
ret[i] = len(self.variables[i])
return ret
@property
def is_hierarchical(self):
return False
class CodeOutput(collections.abc.Mapping):
__metaclass__ = ABCMeta
def __init__(self):
self.output_values = {}
@abstractmethod
def read(self):
pass
# True means that the run is just complete
@property
@abstractmethod
def is_finished(self):
pass
@property
def is_loaded(self):
return not self.output_values == {}
def __contains__(self, x):
return x in self.output_values
def __getitem__(self, x):
return self.output_values.__getitem__(x)
def __iter__(self):
return self.output_values.__iter__()
def __len__(self):
return self.output_values.__len__()
|
MaterialsDiscovery/PyChemia
|
pychemia/code/codes.py
|
Python
|
mit
| 10,796
|
[
"ABINIT"
] |
ef97b60c7cadffc96229577b0954f048180177fac14932283b9ba55ee0aae343
|
""" DISET request handler base class for the TransformationDB.
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.TransformationSystem.DB.TransformationDB import TransformationDB
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from types import StringType, ListType, DictType, IntType, LongType, StringTypes, TupleType
transTypes = list( StringTypes ) + [IntType, LongType]
class TransformationManagerHandlerBase( RequestHandler ):
def _parseRes( self, res ):
if not res['OK']:
gLogger.error( 'TransformationManager failure', res['Message'] )
return res
def setDatabase( self, oDatabase ):
global database
database = oDatabase
types_getCounters = [StringType, ListType, DictType]
def export_getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None ):
res = database.getCounters( table, attrList, condDict, older = older, newer = newer, timeStamp = timeStamp )
return self._parseRes( res )
####################################################################
#
# These are the methods to manipulate the transformations table
#
types_addTransformation = [ StringType, StringType, StringType, StringType, StringType, StringType, StringType]
def export_addTransformation( self, transName, description, longDescription, transType, plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True ):
# authorDN = self._clientTransport.peerCredentials['DN']
# authorGroup = self._clientTransport.peerCredentials['group']
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
authorGroup = credDict[ 'group' ]
res = database.addTransformation( transName, description, longDescription, authorDN, authorGroup, transType, plugin,
agentType, fileMask,
transformationGroup = transformationGroup,
groupSize = groupSize,
inheritedFrom = inheritedFrom,
body = body,
maxTasks = maxTasks,
eventsPerTask = eventsPerTask,
addFiles = addFiles )
if res['OK']:
gLogger.info( "Added transformation %d" % res['Value'] )
return self._parseRes( res )
types_deleteTransformation = [transTypes]
def export_deleteTransformation( self, transName ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTransformation( transName, author = authorDN )
return self._parseRes( res )
types_cleanTransformation = [transTypes]
def export_cleanTransformation( self, transName ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.cleanTransformation( transName, author = authorDN )
return self._parseRes( res )
types_setTransformationParameter = [transTypes, StringTypes]
def export_setTransformationParameter( self, transName, paramName, paramValue ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.setTransformationParameter( transName, paramName, paramValue, author = authorDN )
return self._parseRes( res )
types_deleteTransformationParameter = [transTypes, StringTypes]
def export_deleteTransformationParameter( self, transName, paramName ):
# credDict = self.getRemoteCredentials()
# authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTransformationParameter( transName, paramName )
return self._parseRes( res )
types_getTransformations = []
def export_getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationDate',
orderAttribute = None, limit = None, extraParams = False, offset = None ):
res = database.getTransformations( condDict = condDict,
older = older,
newer = newer,
timeStamp = timeStamp,
orderAttribute = orderAttribute,
limit = limit,
extraParams = extraParams,
offset = offset )
return self._parseRes( res )
types_getTransformation = [transTypes]
def export_getTransformation( self, transName, extraParams = False ):
res = database.getTransformation( transName, extraParams = extraParams )
return self._parseRes( res )
types_getTransformationParameters = [transTypes, list( StringTypes ) + [ListType, TupleType]]
def export_getTransformationParameters( self, transName, parameters ):
res = database.getTransformationParameters( transName, parameters )
return self._parseRes( res )
types_getTransformationWithStatus = [list( StringTypes ) + [ListType, TupleType]]
def export_getTransformationWithStatus( self, status ):
res = database.getTransformationWithStatus( status )
return self._parseRes( res )
####################################################################
#
# These are the methods to manipulate the TransformationFiles tables
#
types_addFilesToTransformation = [transTypes, [ListType, TupleType]]
def export_addFilesToTransformation( self, transName, lfns ):
res = database.addFilesToTransformation( transName, lfns )
return self._parseRes( res )
types_addTaskForTransformation = [transTypes]
def export_addTaskForTransformation( self, transName, lfns = [], se = 'Unknown' ):
res = database.addTaskForTransformation( transName, lfns = lfns, se = se )
return self._parseRes( res )
types_setFileStatusForTransformation = [transTypes, [StringType, DictType]]
def export_setFileStatusForTransformation( self, transName, dictOfNewFilesStatus, lfns = [], force = False ):
""" Sets the file status for the transformation.
The dictOfNewFilesStatus is a dictionary with the form:
{12345: 'StatusA', 6789: 'StatusB', ... }
"""
# create dictionary in case newLFNsStatus is a string - for backward compatibility
if isinstance( dictOfNewFilesStatus, basestring ):
dictOfNewFilesStatus = dict( [( lfn, dictOfNewFilesStatus ) for lfn in lfns ] )
res = database.getTransformationFiles( {'TransformationID':transName, 'LFN': dictOfNewFilesStatus.keys()} )
if not res['OK']:
return res
if res['Value']:
tsFiles = res['Value']
# for convenience, makes a small dictionary out of the tsFiles, with the lfn as key
tsFilesAsDict = {}
for tsFile in tsFiles:
tsFilesAsDict[tsFile['LFN']] = tsFile['FileID']
newStatusForFileIDs = dict( [( tsFilesAsDict[lfn], dictOfNewFilesStatus[lfn] ) for lfn in dictOfNewFilesStatus.keys()] )
else:
newStatusForFileIDs = dictOfNewFilesStatus
res = database._getConnectionTransID( False, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = database.setFileStatusForTransformation( transID, newStatusForFileIDs, connection = connection )
return self._parseRes( res )
types_getTransformationStats = [transTypes]
def export_getTransformationStats( self, transName ):
res = database.getTransformationStats( transName )
return self._parseRes( res )
types_getTransformationFilesCount = [transTypes, StringTypes]
def export_getTransformationFilesCount( self, transName, field, selection = {} ):
res = database.getTransformationFilesCount( transName, field, selection = selection )
return self._parseRes( res )
types_getTransformationFiles = []
def export_getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, offset = None ):
res = database.getTransformationFiles( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
orderAttribute = orderAttribute, limit = limit, offset = offset,
connection = False )
return self._parseRes( res )
####################################################################
#
# These are the methods to manipulate the TransformationTasks table
#
types_getTransformationTasks = []
def export_getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = None, inputVector = False, offset = None ):
res = database.getTransformationTasks( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
orderAttribute = orderAttribute, limit = limit, inputVector = inputVector,
offset = offset )
return self._parseRes( res )
types_setTaskStatus = [transTypes, [ListType, IntType, LongType], StringTypes]
def export_setTaskStatus( self, transName, taskID, status ):
res = database.setTaskStatus( transName, taskID, status )
return self._parseRes( res )
types_setTaskStatusAndWmsID = [ transTypes, [LongType, IntType], StringType, StringType]
def export_setTaskStatusAndWmsID( self, transName, taskID, status, taskWmsID ):
res = database.setTaskStatusAndWmsID( transName, taskID, status, taskWmsID )
return self._parseRes( res )
types_getTransformationTaskStats = [transTypes]
def export_getTransformationTaskStats( self, transName ):
res = database.getTransformationTaskStats( transName )
return self._parseRes( res )
types_deleteTasks = [transTypes, [LongType, IntType], [LongType, IntType]]
def export_deleteTasks( self, transName, taskMin, taskMax ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTasks( transName, taskMin, taskMax, author = authorDN )
return self._parseRes( res )
types_extendTransformation = [transTypes, [LongType, IntType]]
def export_extendTransformation( self, transName, nTasks ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.extendTransformation( transName, nTasks, author = authorDN )
return self._parseRes( res )
types_getTasksToSubmit = [transTypes, [LongType, IntType]]
def export_getTasksToSubmit( self, transName, numTasks, site = '' ):
""" Get information necessary for submission for a given number of tasks for a given transformation """
res = database.getTransformation( transName )
if not res['OK']:
return self._parseRes( res )
transDict = res['Value']
submitDict = {}
res = database.getTasksForSubmission( transName, numTasks = numTasks, site = site, statusList = ['Created'] )
if not res['OK']:
return self._parseRes( res )
tasksDict = res['Value']
for taskID, taskDict in tasksDict.items():
res = database.reserveTask( transName, long( taskID ) )
if not res['OK']:
return self._parseRes( res )
else:
submitDict[taskID] = taskDict
transDict['JobDictionary'] = submitDict
return S_OK( transDict )
####################################################################
#
# These are the methods for TransformationInputDataQuery table
#
types_createTransformationInputDataQuery = [ [LongType, IntType, StringType], DictType ]
def export_createTransformationInputDataQuery( self, transName, queryDict ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.createTransformationInputDataQuery( transName, queryDict, author = authorDN )
return self._parseRes( res )
types_deleteTransformationInputDataQuery = [ [LongType, IntType, StringType] ]
def export_deleteTransformationInputDataQuery( self, transName ):
credDict = self.getRemoteCredentials()
authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
res = database.deleteTransformationInputDataQuery( transName, author = authorDN )
return self._parseRes( res )
types_getTransformationInputDataQuery = [ [LongType, IntType, StringType] ]
def export_getTransformationInputDataQuery( self, transName ):
res = database.getTransformationInputDataQuery( transName )
return self._parseRes( res )
####################################################################
#
# These are the methods for transformation logging manipulation
#
types_getTransformationLogging = [transTypes]
def export_getTransformationLogging( self, transName ):
res = database.getTransformationLogging( transName )
return self._parseRes( res )
####################################################################
#
# These are the methods for transformation additional parameters
#
types_getAdditionalParameters = [transTypes]
def export_getAdditionalParameters( self, transName ):
res = database.getAdditionalParameters( transName )
return self._parseRes( res )
####################################################################
#
# These are the methods for file manipulation
#
types_getFileSummary = [ListType]
def export_getFileSummary( self, lfns ):
res = database.getFileSummary( lfns )
return self._parseRes( res )
types_addDirectory = [StringType]
def export_addDirectory( self, path, force = False ):
res = database.addDirectory( path, force = force )
return self._parseRes( res )
types_exists = [ListType]
def export_exists( self, lfns ):
res = database.exists( lfns )
return self._parseRes( res )
types_addFile = [ [ ListType, DictType ] + list( StringTypes ) ]
def export_addFile( self, fileDicts, force = False ):
""" Interface provides { LFN1 : { PFN1, SE1, ... }, LFN2 : { PFN2, SE2, ... } }
"""
res = database.addFile( fileDicts, force = force )
return self._parseRes( res )
types_removeFile = [[ListType,DictType]]
def export_removeFile( self, lfns ):
""" Interface provides [ LFN1, LFN2, ... ]
"""
if isinstance( lfns, dict ):
lfns = lfns.keys()
res = database.removeFile( lfns )
return self._parseRes( res )
####################################################################
#
# These are the methods used for web monitoring
#
# TODO Get rid of this (talk to Matvey)
types_getDistinctAttributeValues = [StringTypes, DictType]
def export_getDistinctAttributeValues( self, attribute, selectDict ):
res = database.getTableDistinctAttributeValues( 'Transformations', [attribute], selectDict )
if not res['OK']:
return self._parseRes( res )
return S_OK( res['Value'][attribute] )
types_getTableDistinctAttributeValues = [StringTypes, ListType, DictType]
def export_getTableDistinctAttributeValues( self, table, attributes, selectDict ):
res = database.getTableDistinctAttributeValues( table, attributes, selectDict )
return self._parseRes( res )
types_getTransformationStatusCounters = []
def export_getTransformationStatusCounters( self ):
res = database.getCounters( 'Transformations', ['Status'], {} )
if not res['OK']:
return self._parseRes( res )
statDict = {}
for attrDict, count in res['Value']:
statDict[attrDict['Status']] = count
return S_OK( statDict )
types_getTransformationSummary = []
def export_getTransformationSummary( self ):
""" Get the summary of the currently existing transformations """
res = database.getTransformations()
if not res['OK']:
return self._parseRes( res )
transList = res['Value']
resultDict = {}
for transDict in transList:
transID = transDict['TransformationID']
res = database.getTransformationTaskStats( transID )
if not res['OK']:
gLogger.warn( 'Failed to get job statistics for transformation %d' % transID )
continue
transDict['JobStats'] = res['Value']
res = database.getTransformationStats( transID )
if not res['OK']:
transDict['NumberOfFiles'] = -1
else:
transDict['NumberOfFiles'] = res['Value']['Total']
resultDict[transID] = transDict
return S_OK( resultDict )
types_getTabbedSummaryWeb = [StringTypes, DictType, DictType, ListType, IntType, IntType]
def export_getTabbedSummaryWeb( self, table, requestedTables, selectDict, sortList, startItem, maxItems ):
tableDestinations = { 'Transformations' : { 'TransformationFiles' : ['TransformationID'],
'TransformationTasks' : ['TransformationID'] },
'TransformationFiles' : { 'Transformations' : ['TransformationID'],
'TransformationTasks' : ['TransformationID', 'TaskID'] },
'TransformationTasks' : { 'Transformations' : ['TransformationID'],
'TransformationFiles' : ['TransformationID', 'TaskID'] } }
tableSelections = { 'Transformations' : ['TransformationID', 'AgentType', 'Type', 'TransformationGroup',
'Plugin'],
'TransformationFiles' : ['TransformationID', 'TaskID', 'Status', 'UsedSE', 'TargetSE'],
'TransformationTasks' : ['TransformationID', 'TaskID', 'ExternalStatus', 'TargetSE'] }
tableTimeStamps = { 'Transformations' : 'CreationDate',
'TransformationFiles' : 'LastUpdate',
'TransformationTasks' : 'CreationTime' }
tableStatusColumn = { 'Transformations' : 'Status',
'TransformationFiles' : 'Status',
'TransformationTasks' : 'ExternalStatus' }
resDict = {}
res = self.__getTableSummaryWeb( table, selectDict, sortList, startItem, maxItems,
selectColumns = tableSelections[table], timeStamp = tableTimeStamps[table],
statusColumn = tableStatusColumn[table] )
if not res['OK']:
gLogger.error( "Failed to get Summary for table", "%s %s" % ( table, res['Message'] ) )
return self._parseRes( res )
resDict[table] = res['Value']
selections = res['Value']['Selections']
tableSelection = {}
for destination in tableDestinations[table].keys():
tableSelection[destination] = {}
for parameter in tableDestinations[table][destination]:
tableSelection[destination][parameter] = selections.get( parameter, [] )
for table, paramDict in requestedTables.items():
sortList = paramDict.get( 'SortList', [] )
startItem = paramDict.get( 'StartItem', 0 )
maxItems = paramDict.get( 'MaxItems', 50 )
res = self.__getTableSummaryWeb( table, tableSelection[table], sortList, startItem, maxItems,
selectColumns = tableSelections[table], timeStamp = tableTimeStamps[table],
statusColumn = tableStatusColumn[table] )
if not res['OK']:
gLogger.error( "Failed to get Summary for table", "%s %s" % ( table, res['Message'] ) )
return self._parseRes( res )
resDict[table] = res['Value']
return S_OK( resDict )
types_getTransformationsSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getTransformationsSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
return self.__getTableSummaryWeb( 'Transformations', selectDict, sortList, startItem, maxItems,
selectColumns = ['TransformationID', 'AgentType', 'Type', 'Group', 'Plugin'],
timeStamp = 'CreationDate', statusColumn = 'Status' )
types_getTransformationTasksSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getTransformationTasksSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
return self.__getTableSummaryWeb( 'TransformationTasks', selectDict, sortList, startItem, maxItems,
selectColumns = ['TransformationID', 'ExternalStatus', 'TargetSE'],
timeStamp = 'CreationTime', statusColumn = 'ExternalStatus' )
types_getTransformationFilesSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getTransformationFilesSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
return self.__getTableSummaryWeb( 'TransformationFiles', selectDict, sortList, startItem, maxItems,
selectColumns = ['TransformationID', 'Status', 'UsedSE', 'TargetSE'],
timeStamp = 'LastUpdate', statusColumn = 'Status' )
def __getTableSummaryWeb( self, table, selectDict, sortList, startItem, maxItems, selectColumns = [],
timeStamp = None, statusColumn = 'Status' ):
fromDate = selectDict.get( 'FromDate', None )
if fromDate:
del selectDict['FromDate']
# if not fromDate:
# fromDate = last_update
toDate = selectDict.get( 'ToDate', None )
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
# Get the columns that match the selection
fcn = None
fcnName = "get%s" % table
if hasattr( database, fcnName ) and callable( getattr( database, fcnName ) ):
fcn = getattr( database, fcnName )
if not fcn:
return S_ERROR( "Unable to invoke database.%s, it isn't a member function of database" % fcnName )
res = fcn( condDict = selectDict, older = toDate, newer = fromDate, timeStamp = timeStamp,
orderAttribute = orderAttribute )
if not res['OK']:
return self._parseRes( res )
# The full list of columns in contained here
allRows = res['Records']
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
# Create the total records entry
resultDict['TotalRecords'] = len( allRows )
# Create the ParameterNames entry
resultDict['ParameterNames'] = res['ParameterNames']
# Find which element in the tuple contains the requested status
if not statusColumn in resultDict['ParameterNames']:
return S_ERROR( "Provided status column not present" )
statusColumnIndex = resultDict['ParameterNames'].index( statusColumn )
# Get the rows which are within the selected window
if resultDict['TotalRecords'] == 0:
return S_OK( resultDict )
ini = startItem
last = ini + maxItems
if ini >= resultDict['TotalRecords']:
return S_ERROR( 'Item number out of range' )
if last > resultDict['TotalRecords']:
last = resultDict['TotalRecords']
selectedRows = allRows[ini:last]
resultDict['Records'] = selectedRows
# Generate the status dictionary
statusDict = {}
for row in selectedRows:
status = row[statusColumnIndex]
statusDict[status] = statusDict.setdefault( status, 0 ) + 1
resultDict['Extras'] = statusDict
# Obtain the distinct values of the selection parameters
res = database.getTableDistinctAttributeValues( table, selectColumns, selectDict, older = toDate, newer = fromDate )
distinctSelections = zip( selectColumns, [] )
if res['OK']:
distinctSelections = res['Value']
resultDict['Selections'] = distinctSelections
return S_OK( resultDict )
types_getTransformationSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getTransformationSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Get the summary of the transformation information for a given page in the generic format """
# Obtain the timing information from the selectDict
last_update = selectDict.get( 'CreationDate', None )
if last_update:
del selectDict['CreationDate']
fromDate = selectDict.get( 'FromDate', None )
if fromDate:
del selectDict['FromDate']
if not fromDate:
fromDate = last_update
toDate = selectDict.get( 'ToDate', None )
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = []
for i in sortList:
orderAttribute += [ i[0] + ":" + i[1] ]
else:
orderAttribute = None
# Get the transformations that match the selection
res = database.getTransformations( condDict = selectDict, older = toDate, newer = fromDate,
orderAttribute = orderAttribute )
if not res['OK']:
return self._parseRes( res )
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
trList = res['Records']
# Create the total records entry
nTrans = len( trList )
resultDict['TotalRecords'] = nTrans
# Create the ParameterNames entry
# As this list is a reference to the list in the DB, we cannot extend it, therefore copy it
resultDict['ParameterNames'] = list( res['ParameterNames'] )
# Add the job states to the ParameterNames entry
taskStateNames = ['TotalCreated', 'Created', 'Running', 'Submitted', 'Failed', 'Waiting', 'Done', 'Completed', 'Stalled',
'Killed', 'Staging', 'Checking', 'Rescheduled', 'Scheduled']
resultDict['ParameterNames'] += ['Jobs_' + x for x in taskStateNames]
# Add the file states to the ParameterNames entry
fileStateNames = ['PercentProcessed', 'Processed', 'Unused', 'Assigned', 'Total', 'Problematic',
'ApplicationCrash', 'MaxReset']
resultDict['ParameterNames'] += ['Files_' + x for x in fileStateNames]
# Get the transformations which are within the selected window
if nTrans == 0:
return S_OK( resultDict )
ini = startItem
last = ini + maxItems
if ini >= nTrans:
return S_ERROR( 'Item number out of range' )
if last > nTrans:
last = nTrans
transList = trList[ini:last]
statusDict = {}
extendableTranfs = Operations().getValue( 'Transformations/ExtendableTransfTypes',
['Simulation', 'MCsimulation'] )
givenUpFileStatus = Operations().getValue( 'Transformations/GivenUpFileStatus',
['NotProcessed', 'Removed', 'MissingInFC', 'MissingLFC'] )
problematicStatuses = Operations().getValue( 'Transformations/ProblematicStatuses',
['Problematic'] )
# Add specific information for each selected transformation
for trans in transList:
transDict = dict( zip( resultDict['ParameterNames'], trans ) )
# Update the status counters
status = transDict['Status']
statusDict[status] = statusDict.setdefault( status, 0 ) + 1
# Get the statistics on the number of jobs for the transformation
transID = transDict['TransformationID']
res = database.getTransformationTaskStats( transID )
taskDict = {}
if res['OK'] and res['Value']:
taskDict = res['Value']
for state in taskStateNames:
trans.append( taskDict.get( state, 0 ) )
# Get the statistics for the number of files for the transformation
fileDict = {}
transType = transDict['Type']
if transType.lower() in extendableTranfs:
fileDict['PercentProcessed'] = '-'
else:
res = database.getTransformationStats( transID )
if res['OK']:
fileDict = res['Value']
total = fileDict['Total']
for stat in givenUpFileStatus:
total -= fileDict.get( stat, 0 )
processed = fileDict.get( 'Processed', 0 )
fileDict['PercentProcessed'] = "%.1f" % ( int( processed * 1000. / total ) / 10. ) if total else 0.
problematic = 0
for stat in problematicStatuses:
problematic += fileDict.get( stat, 0 )
fileDict ['Problematic'] = problematic
for state in fileStateNames:
trans.append( fileDict.get( state, 0 ) )
resultDict['Records'] = transList
resultDict['Extras'] = statusDict
return S_OK( resultDict )
###########################################################################
database = False
def initializeTransformationManagerHandler( serviceInfo ):
global database
database = TransformationDB( 'TransformationDB', 'Transformation/TransformationDB' )
return S_OK()
class TransformationManagerHandler( TransformationManagerHandlerBase ):
def __init__( self, *args, **kargs ):
self.setDatabase( database )
TransformationManagerHandlerBase.__init__( self, *args, **kargs )
|
vmendez/DIRAC
|
TransformationSystem/Service/TransformationManagerHandler.py
|
Python
|
gpl-3.0
| 29,811
|
[
"DIRAC"
] |
2c0240965e28df8b0061f07d30d84e013147a239fc997d474090832a0de5f3c8
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
import os
import pymatgen
import pymatgen.symmetry.analyzer
##########################
structure_id = 'mp-2176' #MgO 'mp-1265' # GaN 'mp-830' # Si 'mp-149' # 'mp-8884' # 'mp-1265'
##########################
def round_up_to_odd(f):
return int((np.ceil(f) - 0.5 ) // 2 * 2 + 1)
def round_up_to_even(f):
return int(np.ceil(f) // 2 * 2)
# Large k-meshes use odd number, else even
def get_kpoint_mesh_shape(kpoint_per_atom, structure, supercell=(1,1,1)):
reciprocal_cell = np.linalg.inv(structure.cell)*2*np.pi
reciprocal_norm = np.linalg.norm(reciprocal_cell, axis=0)
num_atoms = len(structure.sites)
supercell_size = np.product(supercell)
size = np.power(kpoint_per_atom * num_atoms / supercell_size, 1./3)
if size > 8:
size = round_up_to_odd(size)
else:
size = round_up_to_even(size)
return [size, size, size]
def get_supercell_size(structure, max_atoms=100, crystal_system=None):
def axis_symmetry(axis, crystal_system):
symmetry_dict = {'cubic': [1, 1, 1],
'hexagonal': [1, 1, 2],
'tetragonal': [1, 1, 2],
'monoclinic': [1, 1, 2],
'trigonal': [1, 1, 2]}
try:
return np.where(np.array(symmetry_dict[crystal_system]) == symmetry_dict[crystal_system][axis])[0]
except KeyError:
# If symmetry not defined in symmetry_dict or is None
return np.array([0, 1, 2])
cell = np.array(structure.cell)
num_atoms = len(structure.sites)
supercell_size = [1, 1, 1]
while True:
test_cell = np.dot(cell.T, np.diag(supercell_size)).T
norm = np.linalg.norm(test_cell, axis=1)
index = np.argmin(norm)
supercell_size_test = list(supercell_size)
for i in axis_symmetry(index, crystal_system):
supercell_size_test[i] += 1
anum_atoms_supercell = num_atoms * np.prod(supercell_size_test)
if anum_atoms_supercell > max_atoms:
atoms_minus = num_atoms * np.prod(supercell_size)
atoms_plus = num_atoms * np.prod(supercell_size_test)
if max_atoms - atoms_minus < atoms_plus - max_atoms:
return supercell_size
else:
return supercell_size_test
else:
supercell_size = supercell_size_test
def get_potential_labels(functional, symbol_list, ftype=None):
_, index = np.unique(symbol_list, return_index=True)
symbol_list_unique = np.array(symbol_list)[np.sort(index)]
potential_labels =[]
for symbol in symbol_list_unique:
psp_dir = os.environ['VASP_PSP_DIR'] + '/POT_GGA_PAW_' + functional
all_labels = [f[7:-3] for f in os.listdir(psp_dir) if os.path.isfile(os.path.join(psp_dir, f))]
candidates = [ s for s in all_labels if symbol == s.split('_')[0]]
if ftype is not None:
final = [s for s in candidates if '_{}'.format(ftype) == s[-(len(ftype) + 1):]]
if len(final) > 0:
potential_labels.append(final[0])
continue
potential_labels.append(candidates[0])
return potential_labels
rester = pymatgen.MPRester(os.environ['PMG_MAPI_KEY'])
pmg_structure = rester.get_structure_by_material_id(structure_id)
pmg_band = rester.get_bandstructure_by_material_id(structure_id)
material_name = pmg_structure.formula.replace('1','').replace(' ','')
spa = pymatgen.symmetry.analyzer.SpacegroupAnalyzer(pmg_structure)
conventional = spa.get_conventional_standard_structure()
primitive = spa.get_primitive_standard_structure()
print conventional
primitive_matrix = np.dot(np.linalg.inv(conventional.lattice.matrix), primitive.lattice.matrix)
primitive_matrix = np.round(primitive_matrix, decimals=6).tolist()
structure = StructureData(pymatgen=conventional).store()
print structure
crystal_system = spa.get_crystal_system()
print 'Crystal system: {}'.format(crystal_system)
# if crystal_system == 'hexagonal':
# supercell = [[3, 0, 0],
# [0, 3, 0],
# [0, 0, 3]]
# else:
# supercell = [[2, 0, 0],
# [0, 2, 0],
# [0, 0, 2]]
supercell_size = get_supercell_size(structure, crystal_system=crystal_system)
supercell = np.diag(supercell_size).tolist()
print ('Supercell shape: {}'.format(supercell_size))
# Criteria for INPUT
band_gap = pmg_band.get_band_gap()['energy']
if band_gap > 3.0:
system = 'insulator'
elif band_gap > 0.01:
system = 'semiconductor'
else:
system = 'metal'
print 'system: {}'.format(system)
if system == 'insulator' or system == 'semiconductor':
incar_dict = {
'NELMIN' : 10,
'ENCUT' : 500,
'NELM' : 100,
'ISMEAR' : 0,
'SIGMA' : 0.05,
# 'GGA' : 'PS'
}
if system == 'metal':
incar_dict = {
'NELMIN' : 10,
'ENCUT' : 500,
'NELM' : 100,
'ISMEAR' : 1,
'SIGMA' : 0.2,
# 'GGA' : 'PS'
}
incar_dict.update({
'NPAR': 4,
'ALGO': 38
})
pseudo_dict = {'functional': 'PBE',
'symbols': get_potential_labels('PBE', conventional.symbol_set)}
print pseudo_dict
# Monkhorst-pack
if system == 'insulator' or system == 'semiconductor':
# 100 Kpoints/atom
kpoints_per_atom = 300
# 1000 kpoints/atom
if system == 'metal':
kpoints_per_atom = 1200
if crystal_system == 'hexagonal':
style = 'Gamma'
else:
style = 'Monkhorst'
kpoints_dict = {'style': 'Automatic',
'kpoints_per_atom': kpoints_per_atom}
# kpoints_shape = get_kpoint_mesh_shape(kpoints_per_atom, structure)
# kpoints_dict = {'style': style,
# 'points': kpoints_shape,
# 'shift': [0.0, 0.0, 0.0]}
# kpoints_shape_supercell = get_kpoint_mesh_shape(kpoints_per_atom, structure, supercell=supercell_size)
# kpoints_dict_supercell = {'style': style,
# 'points': kpoints_shape_supercell,
# 'shift': [0.0, 0.0, 0.0]}
# print 'kpoints: {}'.format(kpoints)
# print 'kpoints (supercell): {}'.format(kpoints_shape_supercell)
# print 'shift {}'.format(kshift)
machine_dict = {
'num_machines': 1,
'parallel_env':'mpi*',
'tot_num_mpiprocs': 16}
phonopy_parameters = {'supercell': supercell,
'primitive': primitive_matrix,
'distance': 0.01,
'symmetry_precision': 1e-5,
'mesh': [80, 80, 80]}
wf_parameters = {
'structure': structure,
'phonopy_input': {'parameters': phonopy_parameters},
'input_force': {'code': 'vasp541mpi@boston',
'parameters': incar_dict,
'resources': machine_dict,
'pseudo': pseudo_dict,
'kpoints': kpoints_dict},
'input_optimize': {'code': 'vasp541mpi@boston',
'parameters': incar_dict,
'resources': machine_dict,
'pseudo': pseudo_dict,
'kpoints': kpoints_dict},
}
#Submit workflow
WorkflowQHA = WorkflowFactory('wf_qha')
wf = WorkflowQHA(params=wf_parameters)
wf.label = material_name
wf.description = 'QHA {}'.format(structure.get_formula())
wf.start()
print ('pk: {}'.format(wf.pk))
|
abelcarreras/aiida_extensions
|
workflows/launcher/launch_qha_vasp_mp.py
|
Python
|
mit
| 7,549
|
[
"CRYSTAL",
"pymatgen"
] |
620424b5207320316fac0d71186bbc3d3cc7a1ed06012cdc12e03fd3642d77e4
|
import os, sys, inspect
import sqlite3 as sql
import pickle as pkl
from datetime import datetime
import re
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
import numpy.ma as ma
from numpy.random import uniform, seed
HOME = os.getenv('HOME','/home/meyerson')
BOUT_TOP = os.getenv('BOUT_TOP','/home/meyerson/BOUT')
SCRATCH = os.getenv('SCRATCH','/tmp')
PWD = os.getenv('PWD','/tmp')
WORK = os.getenv('WORK','/work/01523/meyerson/')
utc = datetime.utcnow()
#These are ancillary included files ... necessary
sys.path.append(HOME+'/local/python')
cmd_folder = WORK+'/BOUT_sims/blob_py'
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
print HOME
import numpy as np
#from blob_info import blob_info, Blob2D
from frame import Frame, FrameMovie
from blob_info import blob_info, Blob2D
#we can open 1 file very easily
from paraview.simple import *
from array import *
#define a function
def vtk_to_array(vtk_array):
at = vtk_array.GetDataType()
if at == 11:
#vtkDoubleArray
pt='d'
elif at == 12:
#vtkIdTypeArray
pt='l'
#this is slow. numpy.zeros would be faster.
r = array(pt, [0]*vtk_array.GetSize())
vtk_array.ExportToVoidPointer(r)
return r
#define a function
#start reading at pos i
def get_pos(i):
print base_dir+all_pvtus[i] #this comes from some import from above
r.FileName = base_dir+all_pvtus[i]
r.UpdatePipeline()
z = servermanager.Fetch(r)
pos = z.GetPoints() #grabs positions in xyz
xmin,xmax,ymin,ymax,zmin,zmax = np.round(pos.GetBounds())
print xmin,xmax,ymin,ymax,zmin,zmax
numPoints = pos.GetNumberOfPoints()
allx = []
ally = []
for i in range(numPoints):
x, y, z = pos.GetPoint(i)
allx.append(x)
ally.append(y)
flaty = np.array(sorted(set(ally)))
flatx = np.array(sorted(set(allx)))
x,y = flatx,flaty
print (x-np.roll(x,1))[1:-2]
#probably depends on regular grid
dx = np.mean((x-np.roll(x,1))[1:-2])
dy = np.mean((y-np.roll(y,1))[1:-2])
dy = (y.max() - y.min())/np.round((y.max() - y.min())/dy)
#creates linspace in the matlab sense
y = np.linspace(y.min(),y.max(),1.*np.round((y.max() - y.min())/dy))
#returns dictionary
return {'orig':(allx,ally),'new':(x,y)},dx,dy,xmax-xmin,ymax-ymin
def read_data(base_dir,all_files,pos,cached=False):
if cached: #to save time
Hist = (np.load('lastX.npy')).item()
x = Hist['x']
y = Hist['y']
out = Hist['n']
return out
out = [] #empty list declaration
for files in all_files: #passed to read data
print base_dir+files
r.FileName = base_dir+files
r.UpdatePipeline()
z = servermanager.Fetch(r)
pdata = z.GetPointData()
a = pdata.GetArray('alpha_0')
data = vtk_to_array(a) #send to a standard python array
x,y = pos #needs to have right dimension (xy)
allx = pos['orig'][0]
ally = pos['orig'][1]
x = pos['new'][0]
y = pos['new'][1]
#python thing,
out.append(griddata((allx, ally),data, (x[None,:], y[:,None]), method='cubic'))
out = np.array(out)
Hist={'x':x,'y':y,'n':out}
np.save('lastX',Hist)
return out
def gamma_theory(ny,dky,mu = 1.0e-2 ,alpha = 3.0e-5,beta = 6.0e-4,
Ln = 130.0/4.0, n0 = 10.0):
allk = dky*np.arange(ny)+(1e-8*dky)
ii = complex(0,1)
soln = {}
soln['freq'] = []
soln['gamma'] = []
soln['gammamax'] = []
soln['freqmax'] = []
for i,k in enumerate(allk):
M = np.zeros([2,2],dtype=complex)
#density
M[0,0] = -ii*mu*(k**2)
M[0,1] = k*n0/Ln
#potential
M[1,0] = -beta/(n0*k)
M[1,1] = -ii*(alpha + mu*k**4)/(k**2)
#M = M.transpose()
eigsys= np.linalg.eig(M)
gamma = (eigsys)[0].imag
omega =(eigsys)[0].real
eigvec = eigsys[1]
#print 'k: ',k
soln['gamma'].append(gamma)
soln['gammamax'].append(max(gamma))
where = ((gamma == gamma.max()))
soln['freqmax'].append(omega[where])
soln['freq'].append(omega)
#return the analytic solution
return soln
#this reader can deal with .vptu files
r = XMLPartitionedUnstructuredGridReader()
#generate some filenames
base_dir = "/work/01523/meyerson/linear_Arcon/density_output/"
field = "density"
all_pvtus = []
for files in os.listdir(base_dir):
if files.endswith(".pvtu"):
all_pvtus.append(files)
all_pvtus = sorted(all_pvtus, key = lambda x: int(re.split('-|\.',x)[1]))
pos,dx,dy,Lx,Ly = get_pos(0)
#print all_pvtus[0:1]
data = read_data(base_dir,all_pvtus,pos,cached=True)
nt,nx,ny = data.shape
fftn = np.fft.fft2(data)
Ak = np.sqrt(fftn.conj()*fftn)
pp = PdfPages('summary.pdf')
fig = plt.figure()
frm_data = Frame(data[-1,:,:],meta={'mask':True,'dx':dx,'dy':dy,'title':'n',
'stationary':True})
frm_data.render(fig,221)
dkx = 1.
dky = (2.*np.pi)/Ly
power = Frame(np.real(Ak)[-1,0:30,0:30],meta={'mask':True,'dx':dkx,'dy':dky,'title':'n',
'stationary':True})
power.render(fig,222)
dt = 20
time = dt*np.arange(nt)
gamma_num = (np.gradient(np.log(np.real(Ak)))[0])/(np.gradient(time)[0])
gamma_ave = np.mean(gamma_num[-80:-20,:,:],axis=0)
analytic_soln = gamma_theory(ny,dky)
gamma_th = Frame(np.array(analytic_soln['gammamax'][1:ny/3]),
meta={'dx':dky,'x0':dky,'stationary':True,'yscale':'linear',
'title':r'$\gamma$','fontsz':20,
'ylabel':r'$\frac{\omega}{\omega_{ci}}$',
'xlabel':r'$k_y$','ticksize':14})
gamma_num = Frame(gamma_num[:,1:ny/3,0],meta={'dx':dky,'xlabel':r'$k_y$',
'title':r'$\gamma$',
'ylabel':r'$\frac{\omega}{\omega_{ci}}$',
'x0':dky,'shareax':False,'style':'ro',
'stationary':False,'ticksize':14,'fontsz':20})
gamma_num.t = nt -.7*nt
gamma_num.render(fig,223)
gamma_th.render(fig,223)
print data.shape
namp = abs(data).max(1).max(1)
namp = Frame(namp,meta={'ticksize':14})
namp.render(fig,224)
fig.savefig(pp,format='pdf')
plt.close(fig)
pp.close()
pp = PdfPages('gamma.pdf')
fig = plt.figure()
#gamma_num.render(fig,111)
gamma_th.ax = None
#gamma.t = 20
gamma_num.ax = None
gamma_th.render(fig,111)
gamma_num.render(fig,111)
fig.savefig(pp,format='pdf')
plt.close(fig)
pp.close()
#let's make a movie
fig = plt.figure()
gamma_num.t = 0
gamma_num.ax = fig.add_subplot(111)
gamma_th.ax = gamma_num.ax
#gamma_th.ax = fig.add_subplot(111)
FrameMovie([gamma_num,gamma_th],fast=True,moviename='gamma',fps=6,fig=fig)
|
foci/ArcOn-r2
|
scripts/arcon_view2.py
|
Python
|
lgpl-3.0
| 7,013
|
[
"ParaView"
] |
97d08d0c8be904aac8b7f8a0eb205fdec1d28206de0556585ff61a1444390824
|
'''
Created on Jun 2, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import operator
import collections
import pysam
from seq import DNA_reverse_complement
#
# constants used for CIGAR alignments
#
CIGAR_M = 0 #match Alignment match (can be a sequence match or mismatch)
CIGAR_I = 1 #insertion Insertion to the reference
CIGAR_D = 2 #deletion Deletion from the reference
CIGAR_N = 3 #skip Skipped region from the reference
CIGAR_S = 4 #softclip Soft clip on the read (clipped sequence present in <seq>)
CIGAR_H = 5 #hardclip Hard clip on the read (clipped sequence NOT present in <seq>)
CIGAR_P = 6 #padding Padding (silent deletion from the padded reference sequence)
CIGAR_E = 7 # sequence match
CIGAR_X = 8 # sequence mismatch
REF_ADVANCING_CIGAR_CODES = frozenset((CIGAR_M, CIGAR_D, CIGAR_N, CIGAR_E, CIGAR_X))
SEQ_ADVANCING_CIGAR_CODES = frozenset((CIGAR_M, CIGAR_I, CIGAR_S, CIGAR_E, CIGAR_X))
class CIGAR:
M = 0 #match Alignment match (can be a sequence match or mismatch)
I = 1 #insertion Insertion to the reference
D = 2 #deletion Deletion from the reference
N = 3 #skip Skipped region from the reference
S = 4 #softclip Soft clip on the read (clipped sequence present in <seq>)
H = 5 #hardclip Hard clip on the read (clipped sequence NOT present in <seq>)
P = 6 #padding Padding (silent deletion from the padded reference sequence)
E = 7 #sequence match
X = 8 #sequence mismatch
def parse_reads_by_qname(samfh):
"""
generator function to parse and return lists of
reads that share the same qname
"""
reads = []
for read in samfh:
if len(reads) > 0 and read.qname != reads[-1].qname:
yield reads
reads = []
reads.append(read)
if len(reads) > 0:
yield reads
def parse_pe_reads(bamfh):
"""
generator function to parse and return a tuple of
lists of reads
"""
pe_reads = ([], [])
# reads must be sorted by qname
num_reads = 0
prev_qname = None
for read in bamfh:
# get read attributes
qname = read.qname
readnum = 1 if read.is_read2 else 0
# if query name changes we have completely finished
# the fragment and can reset the read data
if num_reads > 0 and qname != prev_qname:
yield pe_reads
# reset state variables
pe_reads = ([], [])
num_reads = 0
pe_reads[readnum].append(read)
prev_qname = qname
num_reads += 1
if num_reads > 0:
yield pe_reads
def group_read_pairs(pe_reads):
"""
Given tuple of ([read1 reads],[read2 reads]) paired-end read alignments
return mate-pairs and unpaired reads
"""
# group paired reads
paired_reads = ([],[])
unpaired_reads = ([],[])
for rnum,reads in enumerate(pe_reads):
for r in reads:
if r.is_proper_pair:
paired_reads[rnum].append(r)
else:
unpaired_reads[rnum].append(r)
# check if we have at least one pair
pairs = []
if all((len(reads) > 0) for reads in paired_reads):
# index read1 by mate reference name and position
rdict = collections.defaultdict(lambda: collections.deque())
for r in paired_reads[0]:
rdict[(r.rnext,r.pnext)].append(r)
# iterate through read2 and get mate pairs
for r2 in paired_reads[1]:
r1 = rdict[(r2.tid,r2.pos)].popleft()
pairs.append((r1,r2))
return pairs, unpaired_reads
def select_best_scoring_pairs(pairs):
"""
return the set of read pairs (provided as a list of tuples) with
the highest summed alignment score
"""
if len(pairs) == 0:
return []
# gather alignment scores for each pair
pair_scores = [(pair[0].opt('AS') + pair[1].opt('AS'), pair) for pair in pairs]
pair_scores.sort(key=operator.itemgetter(0))
best_score = pair_scores[0][0]
best_pairs = [pair_scores[0][1]]
for score,pair in pair_scores[1:]:
if score < best_score:
break
best_pairs.append(pair)
return best_pairs
def select_primary_alignments(reads):
"""
return only reads that lack the secondary alignment bit
"""
if len(reads) == 0:
return []
# sort reads by number of mismatches
unmapped_reads = []
primary_reads = []
for r in reads:
if r.is_unmapped:
unmapped_reads.append(r)
elif not r.is_secondary:
primary_reads.append(r)
if len(primary_reads) == 0:
assert len(unmapped_reads) > 0
return unmapped_reads
return primary_reads
def copy_read(r):
a = pysam.AlignedRead()
a.qname = r.qname
a.seq = r.seq
a.flag = r.flag
a.tid = r.tid
a.pos = r.pos
a.mapq = r.mapq
a.cigar = r.cigar
a.rnext = r.rnext
a.pnext = r.pnext
a.isize = r.isize
a.qual = r.qual
a.tags = list(r.tags)
return a
def soft_pad_read(fq, r):
"""
'fq' is the fastq record
'r' in the AlignedRead SAM read
"""
# make sequence soft clipped
ext_length = len(fq.seq) - len(r.seq)
cigar_softclip = [(CIGAR_S, ext_length)]
cigar = r.cigar
# reconstitute full length sequence in read
if r.is_reverse:
seq = DNA_reverse_complement(fq.seq)
qual = fq.qual[::-1]
if (cigar is not None) and (ext_length > 0):
cigar = cigar_softclip + cigar
else:
seq = fq.seq
qual = fq.qual
if (cigar is not None) and (ext_length > 0):
cigar = cigar + cigar_softclip
# replace read field
r.seq = seq
r.qual = qual
r.cigar = cigar
def pair_reads(r1, r2, tags=None):
'''
fill in paired-end fields in SAM record
'''
if tags is None:
tags = []
# convert read1 to paired-end
r1.is_paired = True
r1.is_proper_pair = True
r1.is_read1 = True
r1.mate_is_reverse = r2.is_reverse
r1.mate_is_unmapped = r2.is_unmapped
r1.rnext = r2.tid
r1.pnext = r2.pos
tags1 = collections.OrderedDict(r1.tags)
tags1.update(tags)
r1.tags = tags1.items()
# convert read2 to paired-end
r2.is_paired = True
r2.is_proper_pair = True
r2.is_read2 = True
r2.mate_is_reverse = r1.is_reverse
r2.mate_is_unmapped = r1.is_unmapped
r2.rnext = r1.tid
r2.pnext = r1.pos
tags2 = collections.OrderedDict(r2.tags)
tags2.update(tags)
r2.tags = tags2.items()
# compute insert size
if r1.tid != r2.tid:
r1.isize = 0
r2.isize = 0
elif r1.pos > r2.pos:
isize = r1.aend - r2.pos
r1.isize = -isize
r2.isize = isize
else:
isize = r2.aend - r1.pos
r1.isize = isize
r2.isize = -isize
def get_clipped_interval(r):
cigar = r.cigar
padstart, padend = r.pos, r.aend
if len(cigar) > 1:
if (cigar[0][0] == CIGAR_S or
cigar[0][0] == CIGAR_H):
padstart -= cigar[0][1]
elif (cigar[-1][0] == CIGAR_S or
cigar[-1][0] == CIGAR_H):
padend += cigar[-1][1]
return padstart, padend
def get_aligned_intervals(read):
intervals = []
astart = read.pos
aend = astart
for op,length in read.cigar:
if ((op == CIGAR_M) or (op == CIGAR_D) or
(op == CIGAR_E) or (op == CIGAR_X)):
aend += length
elif (op == CIGAR_N):
if aend > astart:
intervals.append((astart, aend))
astart = aend + length
aend = astart
if aend > astart:
intervals.append((astart, aend))
return intervals
|
madhavsuresh/chimerascan
|
chimerascan/lib/sam.py
|
Python
|
gpl-3.0
| 8,356
|
[
"pysam"
] |
e5f3310aaef91c65fe31ed21b7dad44f8d411325b63e00ba3ad3363bfdf84e6b
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
UA_TYPE_MAPPING = {
'chromeos':
'Mozilla/5.0 (X11; CrOS x86_64 9202.60.0) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/57.0.2987.137 Safari/537.36',
'desktop':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/60.0.3112.90 Safari/537.36',
'mobile':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Mobile '
'Safari/535.36',
'tablet':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 7 Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/60.0.3112.90 '
'Safari/535.36',
'tablet_10_inch':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 10 Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/60.0.3112.90 '
'Safari/535.36',
}
def GetChromeUserAgentArgumentFromType(user_agent_type):
"""Returns a chrome user agent based on a user agent type.
This is derived from:
https://developers.google.com/chrome/mobile/docs/user-agent
"""
if user_agent_type:
return ['--user-agent=%s' % UA_TYPE_MAPPING[user_agent_type]]
return []
def GetChromeUserAgentDictFromType(user_agent_type):
if user_agent_type:
return {'userAgent': UA_TYPE_MAPPING[user_agent_type]}
return {}
|
endlessm/chromium-browser
|
third_party/catapult/telemetry/telemetry/internal/browser/user_agent.py
|
Python
|
bsd-3-clause
| 1,542
|
[
"Galaxy"
] |
4cebef3ffe67072bf6a26ad89617f9d3ff2d6586e598f4831f70c569f7f29079
|
from datetime import datetime as dt
import os
from enum import IntEnum
import logging
from typing import Optional
from netCDF4 import Dataset, num2date
from hyo2.soundspeed.base.geodesy import Geodesy
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.profile import Profile
from hyo2.soundspeed.profile.profilelist import ProfileList
from hyo2.abc.lib.progress.cli_progress import CliProgress
logger = logging.getLogger(__name__)
class RegOfsOffline:
class Model(IntEnum):
# East Coast
CBOFS = 10 # RG = True # Format is GoMOFS
DBOFS = 11 # RG = True # Format is GoMOFS
GoMOFS = 12 # RG = True # Format is GoMOFS
NYOFS = 13 # RG = False
SJROFS = 14 # RG = False
# Gulf of Mexico
NGOFS = 20 # RG = True # Format is GoMOFS
TBOFS = 21 # RG = True # Format is GoMOFS
# Great Lakes
LEOFS = 30 # RG = True # Format is GoMOFS
LHOFS = 31 # RG = False
LMOFS = 32 # RG = False
LOOFS = 33 # RG = False
LSOFS = 34 # RG = False
# Pacific Coast
CREOFS = 40 # RG = True # Format is GoMOFS
SFBOFS = 41 # RG = True # Format is GoMOFS
# noinspection DuplicatedCode
regofs_model_descs = \
{
Model.CBOFS: "Chesapeake Bay Operational Forecast System",
Model.DBOFS: "Delaware Bay Operational Forecast System",
Model.GoMOFS: "Gulf of Maine Operational Forecast System",
Model.NYOFS: "Port of New York and New Jersey Operational Forecast System",
Model.SJROFS: "St. John's River Operational Forecast System",
Model.NGOFS: "Northern Gulf of Mexico Operational Forecast System",
Model.TBOFS: "Tampa Bay Operational Forecast System",
Model.LEOFS: "Lake Erie Operational Forecast System",
Model.LHOFS: "Lake Huron Operational Forecast System",
Model.LMOFS: "Lake Michigan Operational Forecast System",
Model.LOOFS: "Lake Ontario Operational Forecast System",
Model.LSOFS: "Lake Superior Operational Forecast System",
Model.CREOFS: "Columbia River Estuary Operational Forecast System",
Model.SFBOFS: "San Francisco Bay Operational Forecast System"
}
def __init__(self, data_folder: str, prj: 'hyo2.soundspeed.soundspeed import SoundSpeedLibrary') -> None:
self.name = self.__class__.__name__
self.desc = "Abstract atlas" # a human-readable description
self.data_folder = data_folder
self.prj = prj
self.g = Geodesy()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def query(self, nc_path: str, lat: float, lon: float) -> Optional[ProfileList]:
if not os.path.exists(nc_path):
raise RuntimeError('Unable to locate %s' % nc_path)
logger.debug('nc path: %s' % nc_path)
if (lat is None) or (lon is None):
logger.error("invalid location query: (%s, %s)" % (lon, lat))
return None
logger.debug('query location: %s, %s' % (lat, lon))
progress = CliProgress()
try:
self._file = Dataset(nc_path)
progress.update(20)
except (RuntimeError, IOError) as e:
logger.warning("unable to access data: %s" % e)
self.clear_data()
progress.end()
return None
try:
self.name = self._file.title
time = self._file.variables['time']
self._timestamp = num2date(time[0], units=time.units)
logger.debug("Retrieved time: %s" % self._timestamp.isoformat())
# Now get latitudes, longitudes and depths for x,y,z referencing
self._lats = self._file.variables['lat'][:]
self._lons = self._file.variables['lon'][:]
# logger.debug('lat:(%s)\n%s' % (self._lats.shape, self._lats))
# logger.debug('lon:(%s)\n%s' % (self._lons.shape, self._lons))
self._zeta = self._file.variables['zeta'][0, :]
self._siglay = self._file.variables['siglay'][:]
self._h = self._file.variables['h'][:]
# logger.debug('zeta:(%s)\n%s' % (self._zeta.shape, self._zeta))
# logger.debug('siglay:(%s)\n%s' % (self._siglay.shape, self._siglay[:, 0]))
# logger.debug('h:(%s)\n%s' % (self._h.shape, self._h))
self._temp = self._file.variables['temp'][:]
self._sal = self._file.variables['salinity'][:]
# logger.debug('temp:(%s)\n%s' % (self._temp.shape, self._temp[:, 0]))
# logger.debug('sal:(%s)\n%s' % (self._sal.shape, self._sal[:, 0]))
except Exception as e:
logger.error("troubles in variable lookup for lat/long grid and/or depth: %s" % e)
self.clear_data()
progress.end()
return None
min_dist = 100000.0
min_idx = None
for idx, _ in enumerate(self._lats):
nc_lat = self._lats[idx]
nc_lon = self._lons[idx]
if nc_lon > 180.0:
nc_lon = nc_lon - 360.0
nc_dist = self.g.distance(nc_lon, nc_lat, lon, lat)
# logger.debug('loc: %.6f, %.6f -> %.6f' % (nc_lat, nc_lon, nc_dist))
if nc_dist < min_dist:
min_dist = nc_dist
min_idx = idx
if min_dist >= 10000.0:
logger.error("location too far from model nodes: %.f" % min_dist)
self.clear_data()
progress.end()
return None
self._loc_idx = min_idx
self._lon = self._lons[self._loc_idx]
if self._lon > 180.0:
self._lon = self._lon - 360.0
self._lat = self._lats[self._loc_idx]
logger.debug('closest node: %d [%s, %s] -> %s' % (self._loc_idx, self._lat, self._lon, min_dist))
zeta = self._zeta[self._loc_idx]
h = self._h[self._loc_idx]
siglay = -self._siglay[:, self._loc_idx]
# logger.debug('zeta: %s, h: %s, siglay: %s' % (zeta, h, siglay))
self._d = siglay * (h + zeta)
# logger.debug('d:(%s)\n%s' % (self._h.shape, self._d))
# Make a new SV object to return our query in
ssp = Profile()
ssp.meta.sensor_type = Dicts.sensor_types['Synthetic']
ssp.meta.probe_type = Dicts.probe_types[self.name]
ssp.meta.latitude = self._lat
ssp.meta.longitude = self._lon
ssp.meta.utc_time = dt(year=self._timestamp.year, month=self._timestamp.month,
day=self._timestamp.day, hour=self._timestamp.hour,
minute=self._timestamp.minute, second=self._timestamp.second)
ssp.meta.original_path = "%s_%s" % (self.name, self._timestamp.strftime("%Y%m%d_%H%M%S"))
ssp.init_data(self._d.shape[0])
ssp.data.depth = self._d[:]
ssp.data.temp = self._temp[0, :, self._loc_idx]
ssp.data.sal = self._sal[0, :, self._loc_idx]
ssp.calc_data_speed()
ssp.clone_data_to_proc()
ssp.init_sis()
profiles = ProfileList()
profiles.append_profile(ssp)
progress.end()
return profiles
def clear_data(self) -> None:
"""Delete the data and reset the last loaded day"""
logger.debug("clearing data")
if self._has_data_loaded:
if self._file:
self._file.close()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def __repr__(self):
msg = "%s" % super().__repr__()
msg += " <has data loaded: %s>\n" % (self._has_data_loaded,)
msg += " <loaded day: %s>\n" % (self._timestamp.strftime(r"%d\%m\%Y"),)
return msg
|
hydroffice/hyo_soundspeed
|
hyo2/soundspeed/atlas/regofsoffline.py
|
Python
|
lgpl-2.1
| 8,583
|
[
"NetCDF"
] |
8b68bea013e2239fcc9cb1fd4a6bcef631b8971df8aeb46370f03a87b487ed04
|
import sys
sys.path.insert(1, "../../../")
import h2o
def offsets_and_distributions(ip,port):
# cars
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars = cars[cars["economy_20mpg"].isna() == 0]
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
offset = h2o.H2OFrame(python_obj=[[.5] for x in range(398)])
offset.setNames(["x1"])
cars = cars.cbind(offset)
# insurance
insurance = h2o.import_file(h2o.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
# bernoulli - offset not supported
#dl = h2o.deeplearning(x=cars[2:8], y=cars["economy_20mpg"], distribution="bernoulli", offset_column="x1",
# training_frame=cars)
#predictions = dl.predict(cars)
# gamma
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="gamma", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# gaussian
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="gaussian", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# poisson
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="poisson", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# tweedie
dl = h2o.deeplearning(x=insurance.names[0:3], y="Claims", distribution="tweedie", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
if __name__ == "__main__":
h2o.run_test(sys.argv, offsets_and_distributions)
|
mrgloom/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_offsets_and_distributionsDeeplearning.py
|
Python
|
apache-2.0
| 1,660
|
[
"Gaussian"
] |
a8a6e14492144d7eeec03e877bcbed894ebc069634d8221fb8439ba887eb07e3
|
import galaxy.model
from galaxy.model.orm import *
from galaxy.model.mapping import context as sa_session
from base.twilltestcase import *
not_logged_in_security_msg = 'You must be logged in as an administrator to access this feature.'
logged_in_security_msg = 'You must be an administrator to access this feature.'
import sys
class TestSecurityAndLibraries( TwillTestCase ):
def test_000_admin_features_when_not_logged_in( self ):
"""Testing admin_features when not logged in"""
self.logout()
self.visit_url( "%s/admin" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/reload_tool?tool_id=upload1" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/roles" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/create_role" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/create_role" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/role" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/groups" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/create_group" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/admin/users" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/library_admin/library" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
self.visit_url( "%s/library_admin/folder?obj_id=1&new=True" % self.url )
self.check_page_for_string( not_logged_in_security_msg )
def test_005_login_as_admin_user( self ):
"""Testing logging in as an admin user test@bx.psu.edu - tests initial settings for DefaultUserPermissions and DefaultHistoryPermissions"""
self.login( email='test@bx.psu.edu' ) # test@bx.psu.edu is configured as our admin user
self.visit_page( "admin" )
self.check_page_for_string( 'Administration' )
global admin_user
admin_user = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email=='test@bx.psu.edu' ) \
.first()
assert admin_user is not None, 'Problem retrieving user with email "test@bx.psu.edu" from the database'
# Get the admin user's private role for later use
global admin_user_private_role
admin_user_private_role = None
for role in admin_user.all_roles():
if role.name == admin_user.email and role.description == 'Private Role for %s' % admin_user.email:
admin_user_private_role = role
break
if not admin_user_private_role:
raise AssertionError( "Private role not found for user '%s'" % admin_user.email )
# Make sure DefaultUserPermissions are correct
if len( admin_user.default_permissions ) > 1:
raise AssertionError( '%d DefaultUserPermissions associated with user %s ( should be 1 )' \
% ( len( admin_user.default_permissions ), admin_user.email ) )
dup = sa_session.query( galaxy.model.DefaultUserPermissions ) \
.filter( galaxy.model.DefaultUserPermissions.table.c.user_id==admin_user.id ) \
.first()
if not dup.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultUserPermission.action for user "%s" is "%s", but it should be "%s"' \
% ( admin_user.email, dup.action, galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
# Make sure DefaultHistoryPermissions are correct
# Logged in as admin_user
latest_history = sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.deleted==False,
galaxy.model.History.table.c.user_id==admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
if len( latest_history.default_permissions ) > 1:
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d when it was created ( should have been 1 )' \
% ( len( latest_history.default_permissions ), latest_history.id ) )
dhp = sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.history_id==latest_history.id ) \
.first()
if not dhp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultHistoryPermission.action for history id %d is "%s", but it should be "%s"' \
% ( latest_history.id, dhp.action, galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.home()
self.visit_url( "%s/admin/user?id=%s" % ( self.url, self.security.encode_id( admin_user.id ) ) )
self.check_page_for_string( admin_user.email )
# Try deleting the admin_user's private role
check_str = "You cannot eliminate a user's private role association."
self.associate_roles_and_groups_with_user( self.security.encode_id( admin_user.id ), admin_user.email,
out_role_ids=str( admin_user_private_role.id ),
check_str=check_str )
self.logout()
def test_010_login_as_regular_user1( self ):
"""Testing logging in as regular user test1@bx.psu.edu - tests private role creation and changing DefaultHistoryPermissions for new histories"""
# Some of the history related tests here are similar to some tests in the
# test_history_functions.py script, so we could potentially eliminate 1 or 2 of them.
self.login( email='test1@bx.psu.edu' ) # test1@bx.psu.edu is not an admin user
global regular_user1
regular_user1 = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email=='test1@bx.psu.edu' ) \
.first()
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
self.visit_page( "admin" )
self.check_page_for_string( logged_in_security_msg )
# Make sure a private role exists for regular_user1
private_role = None
for role in regular_user1.all_roles():
if role.name == regular_user1.email and role.description == 'Private Role for %s' % regular_user1.email:
private_role = role
break
if not private_role:
raise AssertionError( "Private role not found for user '%s'" % regular_user1.email )
global regular_user1_private_role
regular_user1_private_role = private_role
# Add a dataset to the history
self.upload_file( '1.bed' )
latest_dataset = sa_session.query( galaxy.model.Dataset ) \
.order_by( desc( galaxy.model.Dataset.table.c.create_time ) ) \
.first()
# Make sure DatasetPermissions is correct - default is 'manage permissions'
if len( latest_dataset.actions ) > 1:
raise AssertionError( '%d DatasetPermissions were created for dataset id %d when it was created ( should have been 1 )' \
% ( len( latest_dataset.actions ), latest_dataset.id ) )
dp = sa_session.query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.dataset_id==latest_dataset.id ) \
.first()
if not dp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions.action for dataset id %d is "%s", but it should be "manage permissions"' \
% ( latest_dataset.id, dp.action ) )
# Change DefaultHistoryPermissions for regular_user1
permissions_in = []
actions_in = []
for key, value in galaxy.model.Dataset.permitted_actions.items():
# NOTE: setting the 'access' permission with the private role makes this dataset private
permissions_in.append( key )
actions_in.append( value.action )
# Sort actions for later comparison
actions_in.sort()
role_id = str( private_role.id )
self.user_set_default_permissions( permissions_in=permissions_in, role_id=role_id )
# Make sure the default permissions are changed for new histories
self.new_history()
# logged in as regular_user1
latest_history = sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.deleted==False,
galaxy.model.History.table.c.user_id==regular_user1.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
if len( latest_history.default_permissions ) != len( galaxy.model.Dataset.permitted_actions.items() ):
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d, should have been %d' % \
( len( latest_history.default_permissions ), latest_history.id, len( galaxy.model.Dataset.permitted_actions.items() ) ) )
dhps = []
for dhp in latest_history.default_permissions:
dhps.append( dhp.action )
# Sort permissions for later comparison
dhps.sort()
for key, value in galaxy.model.Dataset.permitted_actions.items():
if value.action not in dhps:
raise AssertionError( '%s not in history id %d default_permissions after they were changed' % ( value.action, latest_history.id ) )
# Add a dataset to the history
self.upload_file( '1.bed' )
latest_dataset = sa_session.query( galaxy.model.Dataset ).order_by( desc( galaxy.model.Dataset.table.c.create_time ) ).first()
# Make sure DatasetPermissionss are correct
if len( latest_dataset.actions ) != len( latest_history.default_permissions ):
raise AssertionError( '%d DatasetPermissionss were created for dataset id %d when it was created ( should have been %d )' % \
( len( latest_dataset.actions ), latest_dataset.id, len( latest_history.default_permissions ) ) )
dps = []
for dp in latest_dataset.actions:
dps.append( dp.action )
# Sort actions for later comparison
dps.sort()
# Compare DatasetPermissions with permissions_in - should be the same
if dps != actions_in:
raise AssertionError( 'DatasetPermissionss "%s" for dataset id %d differ from changed default permissions "%s"' \
% ( str( dps ), latest_dataset.id, str( actions_in ) ) )
# Compare DefaultHistoryPermissions and DatasetPermissionss - should be the same
if dps != dhps:
raise AssertionError( 'DatasetPermissionss "%s" for dataset id %d differ from DefaultHistoryPermissions "%s" for history id %d' \
% ( str( dps ), latest_dataset.id, str( dhps ), latest_history.id ) )
self.logout()
def test_015_login_as_regular_user2( self ):
"""Testing logging in as regular user test2@bx.psu.edu - tests changing DefaultHistoryPermissions for the current history"""
email = 'test2@bx.psu.edu'
self.login( email=email ) # This will not be an admin user
global regular_user2
regular_user2 = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email==email ) \
.first()
assert regular_user2 is not None, 'Problem retrieving user with email "" from the database' % email
# Logged in as regular_user2
latest_history = sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.deleted==False,
galaxy.model.History.table.c.user_id==regular_user2.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
self.upload_file( '1.bed' )
latest_dataset = sa_session.query( galaxy.model.Dataset ).order_by( desc( galaxy.model.Dataset.table.c.create_time ) ).first()
permissions_in = [ 'DATASET_MANAGE_PERMISSIONS' ]
# Make sure these are in sorted order for later comparison
actions_in = [ 'manage permissions' ]
permissions_out = [ 'DATASET_ACCESS' ]
actions_out = [ 'access' ]
global regular_user2_private_role
regular_user2_private_role = None
for role in regular_user2.all_roles():
if role.name == regular_user2.email and role.description == 'Private Role for %s' % regular_user2.email:
regular_user2_private_role = role
break
if not regular_user2_private_role:
raise AssertionError( "Private role not found for user '%s'" % regular_user2.email )
role_id = str( regular_user2_private_role.id )
# Change DefaultHistoryPermissions for the current history
self.history_set_default_permissions( permissions_out=permissions_out, permissions_in=permissions_in, role_id=role_id )
if len( latest_history.default_permissions ) != len( actions_in ):
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d, should have been %d' \
% ( len( latest_history.default_permissions ), latest_history.id, len( permissions_in ) ) )
# Make sure DefaultHistoryPermissions were correctly changed for the current history
dhps = []
for dhp in latest_history.default_permissions:
dhps.append( dhp.action )
# Sort permissions for later comparison
dhps.sort()
# Compare DefaultHistoryPermissions and actions_in - should be the same
if dhps != actions_in:
raise AssertionError( 'DefaultHistoryPermissions "%s" for history id %d differ from actions "%s" passed for changing' \
% ( str( dhps ), latest_history.id, str( actions_in ) ) )
# Make sure DatasetPermissionss are correct
if len( latest_dataset.actions ) != len( latest_history.default_permissions ):
raise AssertionError( '%d DatasetPermissionss were created for dataset id %d when it was created ( should have been %d )' \
% ( len( latest_dataset.actions ), latest_dataset.id, len( latest_history.default_permissions ) ) )
dps = []
for dp in latest_dataset.actions:
dps.append( dp.action )
# Sort actions for comparison
dps.sort()
# Compare DatasetPermissionss and DefaultHistoryPermissions - should be the same
if dps != dhps:
raise AssertionError( 'DatasetPermissionss "%s" for dataset id %d differ from DefaultHistoryPermissions "%s"' \
% ( str( dps ), latest_dataset.id, str( dhps ) ) )
self.logout()
def test_020_create_new_user_account_as_admin( self ):
"""Testing creating a new user account as admin"""
self.login( email=admin_user.email )
email = 'test3@bx.psu.edu'
password = 'testuser'
previously_created = self.create_new_account_as_admin( email=email, password=password )
# Get the user object for later tests
global regular_user3
regular_user3 = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email==email ).first()
assert regular_user3 is not None, 'Problem retrieving user with email "%s" from the database' % email
# Make sure DefaultUserPermissions were created
if not regular_user3.default_permissions:
raise AssertionError( 'No DefaultUserPermissions were created for user %s when the admin created the account' % email )
# Make sure a private role was created for the user
if not regular_user3.roles:
raise AssertionError( 'No UserRoleAssociations were created for user %s when the admin created the account' % email )
if not previously_created and len( regular_user3.roles ) != 1:
raise AssertionError( '%d UserRoleAssociations were created for user %s when the admin created the account ( should have been 1 )' \
% ( len( regular_user3.roles ), regular_user3.email ) )
for ura in regular_user3.roles:
role = sa_session.query( galaxy.model.Role ).get( ura.role_id )
if not previously_created and role.type != 'private':
raise AssertionError( 'Role created for user %s when the admin created the account is not private, type is' \
% str( role.type ) )
if not previously_created:
# Make sure a history was not created ( previous test runs may have left deleted histories )
histories = sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.user_id==regular_user3.id,
galaxy.model.History.table.c.deleted==False ) ) \
.all()
if histories:
raise AssertionError( 'Histories were incorrectly created for user %s when the admin created the account' % email )
# Make sure the user was not associated with any groups
if regular_user3.groups:
raise AssertionError( 'Groups were incorrectly associated with user %s when the admin created the account' % email )
def test_025_reset_password_as_admin( self ):
"""Testing reseting a user password as admin"""
email = 'test3@bx.psu.edu'
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testreset' )
self.logout()
def test_030_login_after_password_reset( self ):
"""Testing logging in after an admin reset a password - tests DefaultHistoryPermissions for accounts created by an admin"""
self.login( email='test3@bx.psu.edu', password='testreset' )
# Make sure a History and HistoryDefaultPermissions exist for the user
# Logged in as regular_user3
latest_history = sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.deleted==False,
galaxy.model.History.table.c.user_id==regular_user3.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
if not latest_history.user_id == regular_user3.id:
raise AssertionError( 'A history was not created for user %s when he logged in' % email )
if not latest_history.default_permissions:
raise AssertionError( 'No DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
if len( latest_history.default_permissions ) > 1:
raise AssertionError( 'More than 1 DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhp = sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.history_id==latest_history.id ) \
.first()
if not dhp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultHistoryPermission.action for history id %d is "%s", but it should be "manage permissions"' \
% ( latest_history.id, dhp.action ) )
# Upload a file to create a HistoryDatasetAssociation
self.upload_file( '1.bed' )
latest_dataset = sa_session.query( galaxy.model.Dataset ).order_by( desc( galaxy.model.Dataset.table.c.create_time ) ).first()
for dp in latest_dataset.actions:
# Should only have 1 DatasetPermissions
if dp.action != galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions for dataset id %d is %s ( should have been %s )' \
% ( latest_dataset.id,
latest_dataset.actions.action,
galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.logout()
# Reset the password to the default for later tests
self.login( email='test@bx.psu.edu' )
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testuser' )
def test_035_mark_user_deleted( self ):
"""Testing marking a user account as deleted"""
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
# Deleting a user should not delete any associations
sa_session.refresh( regular_user3 )
if not regular_user3.active_histories:
raise AssertionError( 'HistoryDatasetAssociations for regular_user3 were incorrectly deleted when the user was marked deleted' )
def test_040_undelete_user( self ):
"""Testing undeleting a user account"""
self.undelete_user( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
def test_045_create_role( self ):
"""Testing creating new role with 3 members ( and a new group named the same ), then renaming the role"""
name = 'Role One'
description = "This is Role Ones description"
user_ids=[ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=[],
create_group_for_role='yes',
private_role=admin_user.email )
# Get the role object for later tests
global role_one
role_one = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
assert role_one is not None, 'Problem retrieving role named "Role One" from the database'
# Make sure UserRoleAssociations are correct
if len( role_one.users ) != len( user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created ( should have been %d )' \
% ( len( role_one.users ), role_one.id, len( user_ids ) ) )
# Each of the following users should now have 2 role associations, their private role and role_one
for user in [ admin_user, regular_user1, regular_user3 ]:
sa_session.refresh( user )
if len( user.roles ) != 2:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 2 )' \
% ( len( user.roles ), user.email ) )
# Make sure the group was created
self.home()
self.visit_page( 'admin/groups' )
self.check_page_for_string( name )
global group_zero
group_zero = sa_session.query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name==name ).first()
# Rename the role
rename = "Role One's been Renamed"
redescription="This is Role One's Re-described"
self.rename_role( self.security.encode_id( role_one.id ), name=rename, description=redescription )
self.home()
self.visit_page( 'admin/roles' )
self.check_page_for_string( rename )
self.check_page_for_string( redescription )
# Reset the role back to the original name and description
self.rename_role( self.security.encode_id( role_one.id ), name=name, description=description )
def test_050_create_group( self ):
"""Testing creating new group with 3 members and 1 associated role, then renaming it"""
name = "Group One's Name"
user_ids=[ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
role_ids=[ str( role_one.id ) ]
self.create_group( name=name, in_user_ids=user_ids, in_role_ids=role_ids )
# Get the group object for later tests
global group_one
group_one = sa_session.query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name==name ).first()
assert group_one is not None, 'Problem retrieving group named "Group One" from the database'
# Make sure UserGroupAssociations are correct
if len( group_one.users ) != len( user_ids ):
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been %d )' \
% ( len( group_one.users ), group_one.id, len( user_ids ) ) )
# Each user should now have 1 group association, group_one
for user in [ admin_user, regular_user1, regular_user3 ]:
sa_session.refresh( user )
if len( user.groups ) != 1:
raise AssertionError( '%d UserGroupAssociations are associated with user %s ( should be 1 )' % ( len( user.groups ), user.email ) )
# Make sure GroupRoleAssociations are correct
if len( group_one.roles ) != len( role_ids ):
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been %d )' \
% ( len( group_one.roles ), group_one.id, len( role_ids ) ) )
# Rename the group
rename = "Group One's been Renamed"
self.rename_group( self.security.encode_id( group_one.id ), name=rename, )
self.home()
self.visit_page( 'admin/groups' )
self.check_page_for_string( rename )
# Reset the group back to the original name
self.rename_group( self.security.encode_id( group_one.id ), name=name )
def test_055_add_members_and_role_to_group( self ):
"""Testing editing user membership and role associations of an existing group"""
name = 'Group Two'
self.create_group( name=name, in_user_ids=[], in_role_ids=[] )
# Get the group object for later tests
global group_two
group_two = sa_session.query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name==name ).first()
assert group_two is not None, 'Problem retrieving group named "Group Two" from the database'
# group_two should have no associations
if group_two.users:
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been 0 )' \
% ( len( group_two.users ), group_two.id ) )
if group_two.roles:
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been 0 )' \
% ( len( group_two.roles ), group_two.id ) )
user_ids = [ str( regular_user1.id ) ]
role_ids = [ str( role_one.id ) ]
self.associate_users_and_roles_with_group( self.security.encode_id( group_two.id ),
group_two.name,
user_ids=user_ids,
role_ids=role_ids )
def test_060_create_role_with_user_and_group_associations( self ):
"""Testing creating a role with user and group associations"""
# NOTE: To get this to work with twill, all select lists on the ~/admin/role page must contain at least
# 1 option value or twill throws an exception, which is: ParseError: OPTION outside of SELECT
# Due to this bug in twill, we create the role, we bypass the page and visit the URL in the
# associate_users_and_groups_with_role() method.
name = 'Role Two'
description = 'This is Role Two'
user_ids=[ str( admin_user.id ) ]
group_ids=[ str( group_two.id ) ]
private_role=admin_user.email
# Create the role
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_two
role_two = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
assert role_two is not None, 'Problem retrieving role named "Role Two" from the database'
# Make sure UserRoleAssociations are correct
if len( role_two.users ) != len( user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created with %d members' \
% ( len( role_two.users ), role_two.id, len( user_ids ) ) )
# admin_user should now have 3 role associations, private role, role_one, role_two
sa_session.refresh( admin_user )
if len( admin_user.roles ) != 3:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 3 )' % ( len( admin_user.roles ), admin_user.email ) )
# Make sure GroupRoleAssociations are correct
sa_session.refresh( role_two )
if len( role_two.groups ) != len( group_ids ):
raise AssertionError( '%d GroupRoleAssociations were created for role id %d when it was created ( should have been %d )' \
% ( len( role_two.groups ), role_two.id, len( group_ids ) ) )
# group_two should now be associated with 2 roles: role_one, role_two
sa_session.refresh( group_two )
if len( group_two.roles ) != 2:
raise AssertionError( '%d GroupRoleAssociations are associated with group id %d ( should be 2 )' % ( len( group_two.roles ), group_two.id ) )
def test_065_change_user_role_associations( self ):
"""Testing changing roles associated with a user"""
# Create a new role with no associations
name = 'Role Three'
description = 'This is Role Three'
user_ids=[]
group_ids=[]
private_role=admin_user.email
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_three
role_three = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
assert role_three is not None, 'Problem retrieving role named "Role Three" from the database'
# Associate the role with a user
sa_session.refresh( admin_user )
role_ids = []
for ura in admin_user.non_private_roles:
role_ids.append( str( ura.role_id ) )
role_ids.append( str( role_three.id ) )
group_ids = []
for uga in admin_user.groups:
group_ids.append( str( uga.group_id ) )
check_str = "User '%s' has been updated with %d associated roles and %d associated groups" % ( admin_user.email, len( role_ids ), len( group_ids ) )
self.associate_roles_and_groups_with_user( self.security.encode_id( admin_user.id ),
str( admin_user.email ),
in_role_ids=role_ids,
in_group_ids=group_ids,
check_str=check_str )
sa_session.refresh( admin_user )
# admin_user should now be associated with 4 roles: private, role_one, role_two, role_three
if len( admin_user.roles ) != 4:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 4 )' % ( len( admin_user.roles ), admin_user.email ) )
def test_070_create_library( self ):
"""Testing creating a new library, then renaming it"""
name = "Library One's Name"
description = "This is Library One's description"
self.create_library( name=name, description=description )
self.visit_page( 'library_admin/browse_libraries' )
self.check_page_for_string( name )
self.check_page_for_string( description )
# Get the library object for later tests
global library_one
library_one = sa_session.query( galaxy.model.Library ) \
.filter( and_( galaxy.model.Library.table.c.name==name,
galaxy.model.Library.table.c.description==description,
galaxy.model.Library.table.c.deleted==False ) ) \
.first()
assert library_one is not None, 'Problem retrieving library named "%s" from the database' % name
# Set permissions on the library, sort for later testing
permissions_in = [ k for k, v in galaxy.model.Library.permitted_actions.items() ]
permissions_out = []
# Role one members are: admin_user, regular_user1, regular_user3. Each of these users will be permitted to
# LIBRARY_ADD, LIBRARY_MODIFY, LIBRARY_MANAGE for library items.
self.set_library_permissions( str( library_one.id ), library_one.name, str( role_one.id ), permissions_in, permissions_out )
# Rename the library
rename = "Library One's been Renamed"
redescription = "This is Library One's Re-described"
self.rename_library( str( library_one.id ), library_one.name, name=rename, description=redescription )
self.home()
self.visit_page( 'library_admin/browse_libraries' )
self.check_page_for_string( rename )
self.check_page_for_string( redescription )
# Reset the library back to the original name and description
sa_session.refresh( library_one )
self.rename_library( str( library_one.id ), library_one.name, name=name, description=description )
sa_session.refresh( library_one )
def test_075_library_template_features( self ):
"""Testing adding a template to a library, then filling in the contents"""
# Make sure a form exists
form_name = 'Library template Form One'
form_desc = 'This is Form One'
form_type = galaxy.model.FormDefinition.types.LIBRARY_INFO_TEMPLATE
self.create_form( name=form_name, desc=form_desc, formtype=form_type )
global form_one
form_one = None
fdcs = sa_session.query( galaxy.model.FormDefinitionCurrent ) \
.filter( galaxy.model.FormDefinitionCurrent.table.c.deleted==False ) \
.order_by( galaxy.model.FormDefinitionCurrent.table.c.create_time.desc() )
for fdc in fdcs:
if form_name == fdc.latest_form.name and form_type == fdc.latest_form.type:
form_one = fdc.latest_form
break
assert form_one is not None, 'Problem retrieving form named (%s) from the database' % form_name
# Add a new information template to the library
template_name = 'Library Template 1'
self.add_library_info_template( 'library_admin',
str( library_one.id ),
str( form_one.id ),
form_one.name )
# Make sure the template fields are displayed on the library information page
field_dict = form_one.fields[ 0 ]
global form_one_field_label
form_one_field_label = '%s' % str( field_dict.get( 'label', 'Field 0' ) )
global form_one_field_help
form_one_field_help = '%s' % str( field_dict.get( 'helptext', 'Field 0 help' ) )
global form_one_field_required
form_one_field_required = '%s' % str( field_dict.get( 'required', 'optional' ) ).capitalize()
# Add information to the library using the template
global form_one_field_name
form_one_field_name = 'field_0'
contents = '%s library contents' % form_one_field_label
self.visit_url( '%s/library_admin/library?obj_id=%s&information=True' % ( self.url, str( library_one.id ) ) )
# There are 2 forms on this page and the template is the 2nd form
tc.fv( '2', form_one_field_name, contents )
tc.submit( 'edit_info_button' )
# For some reason, the following check:
# self.check_page_for_string ( 'The information has been updated.' )
# ...throws the following exception - I have not idea why!
# TypeError: 'str' object is not callable
# The work-around is to not make ANY self.check_page_for_string() calls until the next method
def test_080_edit_template_contents_admin_view( self ):
"""Test editing template contents on the admin side"""
# First make sure the templlate contents from the previous method were correctly saved
contents = '%s library contents' % form_one_field_label
contents_edited = contents + ' edited'
self.visit_url( '%s/library_admin/library?obj_id=%s&information=True' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( contents )
# Edit the contents and then save them
tc.fv( '2', form_one_field_name, contents_edited )
tc.submit( 'edit_info_button' )
self.check_page_for_string( 'The information has been updated.' )
self.check_page_for_string( contents_edited )
def test_085_add_public_dataset_to_root_folder( self ):
"""Testing adding a public dataset to the root folder, making sure library template is inherited"""
actions = [ v.action for k, v in galaxy.model.Library.permitted_actions.items() ]
actions.sort()
message = 'Testing adding a public dataset to the root folder'
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for root folder 1.bed" % form_one_field_label
self.add_library_dataset( 'library_admin',
'1.bed',
str( library_one.id ),
str( library_one.root_folder.id ),
library_one.root_folder.name,
file_type='bed',
dbkey='hg18',
message=message.replace( ' ', '+' ),
root=True,
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_one
ldda_one = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_one is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_one from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "1.bed" )
self.check_page_for_string( message )
self.check_page_for_string( admin_user.email )
# Make sure the library permissions were inherited to the library_dataset_dataset_association
ldda_permissions = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociationPermissions ) \
.filter( galaxy.model.LibraryDatasetDatasetAssociationPermissions.table.c.library_dataset_dataset_association_id == ldda_one.id ) \
.all()
ldda_permissions = [ lddap_obj.action for lddap_obj in ldda_permissions ]
ldda_permissions.sort()
assert actions == ldda_permissions, "Permissions for ldda id %s not correctly inherited from library %s" \
% ( ldda_one.id, library_one.name )
# Make sure DatasetPermissions are correct - default is 'manage permissions'
if len( ldda_one.dataset.actions ) > 1:
raise AssertionError( '%d DatasetPermissionss were created for dataset id %d when it was created ( should have been 1 )' \
% ( len( ldda_one.dataset.actions ), ldda_one.dataset.id ) )
dp = sa_session.query( galaxy.model.DatasetPermissions ).filter( galaxy.model.DatasetPermissions.table.c.dataset_id==ldda_one.dataset.id ).first()
if not dp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions.action for dataset id %d is "%s", but it should be "manage permissions"' \
% ( ldda_one.dataset.id, dp.action ) )
# Make sure the library template contents were correctly saved
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( library_one.root_folder.id ), str( ldda_one.id ) ) )
self.check_page_for_string( template_contents )
# Make sure other users can access the dataset from the Libraries view
self.logout()
self.login( email=regular_user2.email )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "1.bed" )
self.logout()
self.login( email=admin_user.email )
self.home()
def test_090_add_new_folder_to_root_folder( self ):
"""Testing adding a folder to a library root folder"""
root_folder = library_one.root_folder
name = "Root Folder's Folder One"
description = "This is the root folder's Folder One"
self.add_folder( 'library_admin',
str( library_one.id ),
str( root_folder.id ),
name=name,
description=description )
global folder_one
folder_one = sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==root_folder.id,
galaxy.model.LibraryFolder.table.c.name==name,
galaxy.model.LibraryFolder.table.c.description==description ) ) \
.first()
assert folder_one is not None, 'Problem retrieving library folder named "%s" from the database' % name
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( name )
self.check_page_for_string( description )
self.home()
self.visit_url( '%s/library_admin/folder?obj_id=%s&library_id=%s&information=True' % ( self.url, str( folder_one.id ), str( library_one.id ) ) )
# Make sure the template was inherited
self.check_page_for_string( form_one_field_name )
# Make sure the template contents were NOT inherited
contents = '%s library contents' % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Library level template contents were displayed in the folders inherited template fields"
except:
pass
# Add contents to the inherited template
template_contents = "%s contents for Folder One" % form_one_field_label
# There are 2 forms on this page and the template is the 2nd form
tc.fv( '2', form_one_field_name, template_contents )
tc.submit( 'edit_info_button' )
self.check_page_for_string( 'The information has been updated.' )
self.check_page_for_string( template_contents )
def test_095_add_subfolder_to_folder( self ):
"""Testing adding a folder to a library folder"""
name = "Folder One's Subfolder"
description = "This is the Folder One's subfolder"
self.add_folder( 'library_admin', str( library_one.id ), str( folder_one.id ), name=name, description=description )
global subfolder_one
subfolder_one = sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==folder_one.id,
galaxy.model.LibraryFolder.table.c.name==name,
galaxy.model.LibraryFolder.table.c.description==description ) ) \
.first()
assert subfolder_one is not None, 'Problem retrieving library folder named "Folder Ones Subfolder" from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( name )
self.check_page_for_string( description )
self.home()
self.visit_url( '%s/library_admin/folder?obj_id=%s&library_id=%s&information=True' % ( self.url, str( subfolder_one.id ), str( library_one.id ) ) )
# Make sure the template was inherited
self.check_page_for_string( form_one_field_name )
# Make sure the template contents were NOT inherited
contents = "%s contents for Folder One" % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder level template contents were displayed in the sub-folders inherited template fields"
except:
pass
# Add contents to the inherited template
template_contents = "%s contents for Folder One's Subfolder" % form_one_field_label
# There are 2 forms on this page and the template is the 2nd form
tc.fv( '2', form_one_field_name, template_contents )
tc.submit( 'edit_info_button' )
self.check_page_for_string( 'The information has been updated.' )
self.check_page_for_string( template_contents )
def test_100_add_2nd_new_folder_to_root_folder( self ):
"""Testing adding a 2nd folder to a library root folder"""
root_folder = library_one.root_folder
name = "Folder Two"
description = "This is the root folder's Folder Two"
self.add_folder( 'library_admin', str( library_one.id ), str( root_folder.id ), name=name, description=description )
global folder_two
folder_two = sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==root_folder.id,
galaxy.model.LibraryFolder.table.c.name==name,
galaxy.model.LibraryFolder.table.c.description==description ) ) \
.first()
assert folder_two is not None, 'Problem retrieving library folder named "%s" from the database' % name
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( name )
self.check_page_for_string( description )
self.home()
self.visit_url( '%s/library_admin/folder?obj_id=%s&library_id=%s&information=True' % ( self.url, str( subfolder_one.id ), str( library_one.id ) ) )
# Make sure the template was inherited
self.check_page_for_string( form_one_field_name )
# Make sure the template contents were NOT inherited
contents = '%s library contents' % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder level template contents were displayed in the sub-folders inherited template fields"
except:
pass
def test_105_add_public_dataset_to_root_folders_2nd_subfolder( self ):
"""Testing adding a public dataset to the root folder's 2nd sub-folder"""
actions = [ v.action for k, v in galaxy.model.Library.permitted_actions.items() ]
actions.sort()
message = "Testing adding a public dataset to the folder named %s" % folder_two.name
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s 2.bed" % ( form_one_field_label, folder_two.name )
self.add_library_dataset( 'library_admin',
'2.bed',
str( library_one.id ),
str( folder_two.id ),
folder_two.name,
file_type='bed',
dbkey='hg18',
message=message.replace( ' ', '+' ),
root=False,
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_two
ldda_two = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_two is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_two from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "2.bed" )
self.check_page_for_string( message )
self.check_page_for_string( admin_user.email )
# Make sure the library template contents were correctly saved
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( folder_two.id ), str( ldda_two.id ) ) )
self.check_page_for_string( template_contents )
def test_110_add_2nd_public_dataset_to_root_folders_2nd_subfolder( self ):
"""Testing adding a 2nd public dataset to the root folder's 2nd sub-folder"""
actions = [ v.action for k, v in galaxy.model.Library.permitted_actions.items() ]
actions.sort()
message = "Testing adding a 2nd public dataset to the folder named %s" % folder_two.name
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s 3.bed" % ( form_one_field_label, folder_two.name )
self.add_library_dataset( 'library_admin',
'3.bed',
str( library_one.id ),
str( folder_two.id ),
folder_two.name,
file_type='bed',
dbkey='hg18',
message=message.replace( ' ', '+' ),
root=False,
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_three
ldda_three = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_three is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_three from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "3.bed" )
self.check_page_for_string( message )
self.check_page_for_string( admin_user.email )
# Make sure the library template contents were correctly saved
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( folder_two.id ), str( ldda_three.id ) ) )
self.check_page_for_string( template_contents )
def test_115_add_dataset_with_private_role_restriction_to_folder( self ):
"""Testing adding a dataset with a private role restriction to a folder"""
# Add a dataset restricted by the following:
# DATASET_MANAGE_PERMISSIONS = "test@bx.psu.edu" via DefaultUserPermissions
# DATASET_ACCESS = "regular_user1" private role via this test method
# LIBRARY_ADD = "Role One" via inheritance from parent folder
# LIBRARY_MODIFY = "Role One" via inheritance from parent folder
# LIBRARY_MANAGE = "Role One" via inheritance from parent folder
# "Role One" members are: test@bx.psu.edu, test1@bx.psu.edu, test3@bx.psu.edu
# This means that only user test1@bx.psu.edu can see the dataset from the Libraries view
#
# TODO: this demonstrates a weakness in our logic: If test@bx.psu.edu cannot
# access the dataset from the Libraries view, then the DATASET_MANAGE_PERMISSIONS
# setting is useless if test@bx.psu.edu is not an admin. This should be corrected,
# by displaying a warning message on the permissions form.
message ='This is a test of the fourth dataset uploaded'
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s 4.bed" % ( form_one_field_label, folder_one.name )
self.add_library_dataset( 'library_admin',
'4.bed',
str( library_one.id ),
str( folder_one.id ),
folder_one.name,
file_type='bed',
dbkey='hg18',
roles=[ str( regular_user1_private_role.id ) ],
message=message.replace( ' ', '+' ),
root=False,
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_four
ldda_four = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_four is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_four from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "4.bed" )
self.check_page_for_string( message )
self.check_page_for_string( admin_user.email )
self.home()
# Make sure the library template contents were correctly saved
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( folder_one.id ), str( ldda_four.id ) ) )
self.check_page_for_string( template_contents )
def test_120_accessing_dataset_with_private_role_restriction( self ):
"""Testing accessing a dataset with a private role restriction"""
# admin_user should not be able to see 2.bed from the analysis view's access libraries
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
try:
self.check_page_for_string( folder_one.name )
raise AssertionError( '%s can see library folder %s when it contains only datasets restricted by role %s' \
% ( admin_user.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
try:
self.check_page_for_string( '4.bed' )
raise AssertionError( '%s can see dataset 4.bed in library folder %s when it was restricted by role %s' \
% ( admin_user.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
self.logout()
# regular_user1 should be able to see 4.bed from the analysis view's access librarys
# since it was associated with regular_user1's private role
self.login( email='test1@bx.psu.edu' )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( folder_one.name )
self.check_page_for_string( '4.bed' )
self.logout()
# regular_user2 should not be able to see 1.bed from the analysis view's access librarys
self.login( email='test2@bx.psu.edu' )
try:
self.check_page_for_string( folder_one.name )
raise AssertionError( '%s can see library folder %s when it contains only datasets restricted by role %s' \
% ( regular_user2.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
try:
self.check_page_for_string( '4.bed' )
raise AssertionError( '%s can see dataset 4.bed in library folder %s when it was restricted by role %s' \
% ( regular_user2.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
self.logout()
# regular_user3 should not be able to see 2.bed from the analysis view's access librarys
self.login( email='test3@bx.psu.edu' )
try:
self.check_page_for_string( folder_one.name )
raise AssertionError( '%s can see library folder %s when it contains only datasets restricted by role %s' \
% ( regular_user3.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
try:
self.check_page_for_string( '4.bed' )
raise AssertionError( '%s can see dataset 4.bed in library folder %s when it was restricted by role %s' \
% ( regular_user3.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass # This is the behavior we want
self.logout()
self.login( email=admin_user.email )
self.home()
def test_125_change_dataset_access_permission( self ):
"""Testing changing the access permission on a dataset with a private role restriction"""
# We need admin_user to be able to access 2.bed
permissions_in = [ k for k, v in galaxy.model.Dataset.permitted_actions.items() ] + \
[ k for k, v in galaxy.model.Library.permitted_actions.items() ]
permissions_out = []
role_ids_str = '%s,%s' % ( str( role_one.id ), str( admin_user_private_role.id ) )
self.set_library_dataset_permissions( str( library_one.id ), str( folder_one.id ), str( ldda_four.id ), ldda_four.name,
role_ids_str, permissions_in, permissions_out )
# admin_user should now be able to see 4.bed from the analysis view's access libraries
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( ldda_four.name )
self.home()
def test_130_add_dataset_with_role_associated_with_group_and_users( self ):
"""Testing adding a dataset with a role that is associated with a group and users"""
self.login( email='test@bx.psu.edu' )
# Add a dataset restricted by role_two, which is currently associated as follows:
# groups: group_two
# users: test@bx.psu.edu, test1@bx.psu.edu via group_two
message = 'Testing adding a dataset with a role that is associated with a group and users'
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s 5.bed" % ( form_one_field_label, folder_one.name )
self.add_library_dataset( 'library_admin',
'5.bed',
str( library_one.id ),
str( folder_one.id ),
folder_one.name,
file_type='bed',
dbkey='hg17',
roles=[ str( role_two.id ) ],
message=message.replace( ' ', '+' ),
root=False,
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_five
ldda_five = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_five is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_five from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "5.bed" )
self.check_page_for_string( message )
self.check_page_for_string( admin_user.email )
self.home()
# Make sure the library template contents were correctly saved
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( folder_one.id ), str( ldda_five.id ) ) )
self.check_page_for_string( template_contents )
def test_135_accessing_dataset_with_role_associated_with_group_and_users( self ):
"""Testing accessing a dataset with a role that is associated with a group and users"""
# admin_user should be able to see 5.bed since she is associated with role_two
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( "5.bed" )
self.check_page_for_string( admin_user.email )
self.logout()
# regular_user1 should be able to see 5.bed since she is associated with group_two
self.login( email = 'test1@bx.psu.edu' )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( folder_one.name )
self.check_page_for_string( '5.bed' )
self.check_page_for_string( admin_user.email )
# Check the permissions on the dataset 5.bed - they are as folows:
# DATASET_MANAGE_PERMISSIONS = test@bx.psu.edu
# DATASET_ACCESS = Role Two
# Role Two associations: test@bx.psu.edu and Group Two
# Group Two members: Role One, Role Two, test1@bx.psu.edu
# Role One associations: test@bx.psu.edu, test1@bx.psu.edu, test3@bx.psu.edu
# LIBRARY_ADD = Role One
# Role One aassociations: test@bx.psu.edu, test1@bx.psu.edu, test3@bx.psu.edu
# LIBRARY_MODIFY = Role One
# Role One aassociations: test@bx.psu.edu, test1@bx.psu.edu, test3@bx.psu.edu
# LIBRARY_MANAGE = Role One
# Role One aassociations: test@bx.psu.edu, test1@bx.psu.edu, test3@bx.psu.edu
self.home()
self.visit_url( '%s/library/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s' \
% ( self.url, str( library_one.id ), str( folder_one.id ), str( ldda_five.id ) ) )
self.check_page_for_string( '5.bed' )
self.check_page_for_string( 'This is the latest version of this library dataset' )
# Current user test1@bx.psu.edu has Role One, which has the LIBRARY_MODIFY permission
self.check_page_for_string( 'Edit attributes of 5.bed' )
self.home()
# Test importing the restricted dataset into a history, can't use the
# ~/library_admin/libraries form as twill barfs on it so we'll simulate the form submission
# by going directly to the form action
self.visit_url( '%s/library/datasets?do_action=add&ldda_ids=%d&library_id=%s' \
% ( self.url, ldda_five.id, str( library_one.id ) ) )
self.check_page_for_string( '1 dataset(s) have been imported into your history' )
self.logout()
# regular_user2 should not be able to see 5.bed
self.login( email = 'test2@bx.psu.edu' )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
try:
self.check_page_for_string( folder_one.name )
raise AssertionError( '%s can see library folder %s when it contains only datasets restricted by role %s' \
% ( regular_user2.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
try:
self.check_page_for_string( '5.bed' )
raise AssertionError( '%s can see dataset 5.bed in library folder %s when it was restricted by role %s' \
% ( regular_user2.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
# regular_user3 should not be able to see folder_one ( even though it does not contain any datasets that she
# can access ) since she has Role One, and Role One has all library permissions ( see above ).
self.login( email = 'test3@bx.psu.edu' )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( folder_one.name )
# regular_user3 should not be able to see 5.bed since users must have every role associated
# with the dataset in order to access it, and regular_user3 isnot associated with Role Two
try:
self.check_page_for_string( '5.bed' )
raise AssertionError( '%s can see dataset 5.bed in library folder %s when it was restricted by role %s' \
% ( regular_user3.email, folder_one.name, regular_user1_private_role.description ) )
except:
pass
self.logout()
self.login( email='test@bx.psu.edu' )
def test_140_copy_dataset_from_history_to_subfolder( self ):
"""Testing copying a dataset from the current history to a subfolder"""
self.new_history()
self.upload_file( "6.bed" )
latest_hda = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
.first()
self.add_history_datasets_to_library( str( library_one.id ), str( subfolder_one.id ), subfolder_one.name, str( latest_hda.id ), root=False )
# Test for DatasetPermissionss, the default setting is "manage permissions"
last_dataset_created = sa_session.query( galaxy.model.Dataset ) \
.order_by( desc( galaxy.model.Dataset.table.c.create_time ) ) \
.first()
dps = sa_session.query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.dataset_id==last_dataset_created.id ) \
.all()
if not dps:
raise AssertionError( 'No DatasetPermissionss created for dataset id: %d' % last_dataset_created.id )
if len( dps ) > 1:
raise AssertionError( 'More than 1 DatasetPermissionss created for dataset id: %d' % last_dataset_created.id )
for dp in dps:
if not dp.action == 'manage permissions':
raise AssertionError( 'DatasetPermissions.action "%s" is not the DefaultHistoryPermission setting of "manage permissions"' \
% str( dp.action ) )
global ldda_six
ldda_six = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_six is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_six from the database'
self.home()
# Make sure the correct template was inherited
self.visit_url( '%s/library/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s' \
% ( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six.id ) ) )
self.check_page_for_string( form_one_field_name )
# Make sure the template contents were NOT inherited
contents = "%s contents for Folder One's Subfolder" % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder template contents were displayed in the sub-folders inherited template fields"
except:
pass
def test_145_editing_dataset_attribute_info( self ):
"""Testing editing a datasets attribute information"""
new_ldda_name = '6.bed ( version 1 )'
self.edit_ldda_attribute_info( str( library_one.id ), str( subfolder_one.id ), str( ldda_six.id ), ldda_six.name, new_ldda_name )
self.home()
sa_session.refresh( ldda_six )
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( ldda_six.name )
self.home()
# Make sure the template contents were NOT inherited
self.visit_url( '%s/library/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s' \
% ( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six.id ) ) )
self.check_page_for_string( form_one_field_name )
contents = "%s contents for Folder One's Subfolder" % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder template contents were displayed in the sub-folders inherited template fields"
except:
pass
def test_150_uploading_new_dataset_version( self ):
"""Testing uploading a new version of a library dataset"""
message = 'Testing uploading a new version of a dataset'
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s new version of 6.bed" % ( form_one_field_label, folder_one.name )
self.upload_new_dataset_version( '6.bed',
str( library_one.id ),
str( subfolder_one.id ),
str( subfolder_one.name ),
str( ldda_six.library_dataset.id ),
ldda_six.name,
file_type='auto',
dbkey='hg18',
message=message.replace( ' ', '+' ),
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_six_version_two
ldda_six_version_two = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_six_version_two is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_six_version_two from the database'
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six_version_two.id ) ) )
self.check_page_for_string( 'This is the latest version of this library dataset' )
# Make sure the correct template was inherited
self.check_page_for_string( template_contents )
# Make sure it does not include any inherited contents
contents = "%s contents for Folder One's Subfolder" % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder template contents were displayed in the sub-folders inherited template fields"
except:
pass
# There are 4 forms on this page and the template is the 4th form
tc.fv( '4', form_one_field_name, template_contents )
tc.submit( 'edit_info_button' )
self.check_page_for_string( 'The information has been updated.' )
self.check_page_for_string( template_contents )
# Make sure the permissions are the same
sa_session.refresh( ldda_six )
if len( ldda_six.actions ) != len( ldda_six_version_two.actions ):
raise AssertionError( 'ldda "%s" actions "%s" != ldda "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.actions ),
ldda_six_version_two.name, str( ldda_six_version_two.actions ) ) )
if len( ldda_six.library_dataset.actions ) != len( ldda_six_version_two.library_dataset.actions ):
raise AssertionError( 'ldda.library_dataset "%s" actions "%s" != ldda.library_dataset "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.library_dataset.actions ), ldda_six_version_two.name, str( ldda_six_version_two.library_dataset.actions ) ) )
if len( ldda_six.dataset.actions ) != len( ldda_six_version_two.dataset.actions ):
raise AssertionError( 'ldda.dataset "%s" actions "%s" != ldda.dataset "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.dataset.actions ), ldda_six_version_two.name, str( ldda_six_version_two.dataset.actions ) ) )
# Check the previous version
self.visit_url( "%s/library_admin/ldda_display_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six.id ) ) )
self.check_page_for_string( 'This is an expired version of this library dataset' )
self.home()
# Make sure ldda_six is no longer displayed in the library
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
try:
self.check_page_for_string( ldda_six.name )
raise AssertionError, "Old version of library dataset %s is displayed in library" % ldda_six.name
except:
pass
self.home()
message = 'Testing uploading a new version of a library dataset'
# The form_one template should be inherited to the library dataset upload form.
template_contents = "%s contents for %s 5th new version of 6.bed" % ( form_one_field_label, folder_one.name )
sa_session.refresh( ldda_six_version_two )
self.upload_new_dataset_version( '6.bed',
str( library_one.id ),
str( subfolder_one.id ),
str( subfolder_one.name ),
str( ldda_six_version_two.library_dataset.id ),
ldda_six_version_two.name,
file_type='auto',
dbkey='hg18',
message=message.replace( ' ', '+' ),
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
global ldda_six_version_five
ldda_six_version_five = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_six_version_five is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_six_version_five from the database'
self.home()
self.visit_url( "%s/library_admin/ldda_edit_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six_version_five.id ) ) )
self.check_page_for_string( 'This is the latest version of this library dataset' )
# Make sure the correct template was inherited
self.check_page_for_string( template_contents )
# Make sure it does not include any inherited contents
contents = "%s contents for Folder One's Subfolder" % form_one_field_label
try:
self.check_page_for_string( contents )
raise AssertionError, "Parent folder template contents were displayed in the sub-folders inherited template fields"
except:
pass
# There are 4 forms on this page and the template is the 4th form
tc.fv( '4', form_one_field_name, template_contents )
tc.submit( 'edit_info_button' )
self.check_page_for_string( 'The information has been updated.' )
self.check_page_for_string( template_contents )
self.visit_url( "%s/library_admin/ldda_display_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six_version_five.id ) ) )
check_str = 'Expired versions of %s' % ldda_six_version_five.name
self.check_page_for_string( check_str )
self.check_page_for_string( ldda_six.name )
self.home()
# Make sure the permissions are the same
sa_session.refresh( ldda_six )
if len( ldda_six.actions ) != len( ldda_six_version_five.actions ):
raise AssertionError( 'ldda "%s" actions "%s" != ldda "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.actions ),
ldda_six_version_five.name, str( ldda_six_version_five.actions ) ) )
if len( ldda_six.library_dataset.actions ) != len( ldda_six_version_five.library_dataset.actions ):
raise AssertionError( 'ldda.library_dataset "%s" actions "%s" != ldda.library_dataset "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.library_dataset.actions ), ldda_six_version_five.name, str( ldda_six_version_five.library_dataset.actions ) ) )
if len( ldda_six.dataset.actions ) != len( ldda_six_version_five.dataset.actions ):
raise AssertionError( 'ldda.dataset "%s" actions "%s" != ldda.dataset "%s" actions "%s"' \
% ( ldda_six.name, str( ldda_six.dataset.actions ), ldda_six_version_five.name, str( ldda_six_version_five.dataset.actions ) ) )
# Check the previous version
self.visit_url( "%s/library_admin/ldda_display_info?library_id=%s&folder_id=%s&obj_id=%s" % \
( self.url, str( library_one.id ), str( subfolder_one.id ), str( ldda_six_version_two.id ) ) )
self.check_page_for_string( 'This is an expired version of this library dataset' )
self.home()
def test_155_upload_directory_of_files_from_admin_view( self ):
"""Testing uploading a directory of files to a root folder from the Admin view"""
message = 'This is a test for uploading a directory of files'
template_contents = "%s contents for directory of 3 datasets in %s" % ( form_one_field_label, folder_one.name )
roles_tuple = [ ( str( role_one.id ), role_one.name ) ]
check_str = "Added 3 datasets to the library '%s' ( each is selected )." % library_one.root_folder.name
self.add_dir_of_files_from_admin_view( str( library_one.id ),
str( library_one.root_folder.id ),
roles_tuple=roles_tuple,
message=message.replace( '+', ' ' ),
template_field_name1=form_one_field_name,
template_field_contents1=template_contents )
self.home()
self.visit_page( 'library_admin/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
self.check_page_for_string( admin_user.email )
self.check_page_for_string( message )
self.home()
def test_160_change_permissions_on_datasets_uploaded_from_library_dir( self ):
"""Testing changing the permissions on datasets uploaded from a directory"""
# It would be nice if twill functioned such that the above test resulted in a
# form with the uploaded datasets selected, but it does not ( they're not checked ),
# so we'll have to simulate this behavior ( not ideal ) for the 'edit' action. We
# first need to get the ldda.id for the 3 new datasets
latest_3_lddas = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.update_time ) ) \
.limit( 3 )
ldda_ids = ''
for ldda in latest_3_lddas:
ldda_ids += '%s,' % str( ldda.id )
ldda_ids = ldda_ids.rstrip( ',' )
permissions = [ 'DATASET_ACCESS', 'DATASET_MANAGE_PERMISSIONS' ]
def build_url( permissions, role ):
# We'll bypass the library_admin/datasets method and directly call the library_admin/dataset method, setting
# access, manage permissions, and edit metadata permissions to role_one
url = '/library_admin/ldda_manage_permissions?obj_id=%s&library_id=%s&folder_id=%s&update_roles_button=Save' % ( ldda_ids, str( library_one.id ), str( folder_one.id ) )
for p in permissions:
url += '&%s_in=%s' % ( p, str( role.id ) )
return url
url = build_url( permissions, role_one )
self.home()
self.visit_url( url )
self.check_page_for_string( 'Permissions have been updated on 3 datasets' )
def check_edit_page1( lddas ):
# Make sure the permissions have been correctly updated for the 3 datasets. Permissions should
# be all of the above on any of the 3 datasets that are imported into a history
for ldda in lddas:
# Import each library dataset into our history
self.home()
self.visit_url( '%s/library/datasets?do_action=add&ldda_ids=%s&library_id=%s' % ( self.url, str( ldda.id ), str( library_one.id ) ) )
# Determine the new HistoryDatasetAssociation id created when the library dataset was imported into our history
last_hda_created = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
.first()
self.home()
self.visit_url( '%s/root/edit?id=%s' % ( self.url, str( last_hda_created.id ) ) )
self.check_page_for_string( 'Edit Attributes' )
self.check_page_for_string( last_hda_created.name )
check_str = 'Manage dataset permissions and role associations of %s' % last_hda_created.name
self.check_page_for_string( check_str )
self.check_page_for_string( 'Role members can manage the roles associated with this dataset' )
self.check_page_for_string( 'Role members can import this dataset into their history for analysis' )
# admin_user is associated with role_one, so should have all permissions on imported datasets
check_edit_page1( latest_3_lddas )
self.logout()
# regular_user1 is associated with role_one, so should have all permissions on imported datasets
self.login( email='test1@bx.psu.edu' )
check_edit_page1( latest_3_lddas )
self.logout()
# Since regular_user2 is not associated with role_one, she should not have
# access to any of the 3 datasets, so she will not see folder_one on the libraries page
self.login( email='test2@bx.psu.edu' )
self.home()
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
try:
self.check_page_for_string( folder_one.name )
raise AssertionError( '%s can access folder %s even though all contained datasets should be restricted from access by her' \
% ( regular_user2.email, folder_one.name ) )
except:
pass # This is the behavior we want
self.logout()
# regular_user3 is associated with role_one, so should have all permissions on imported datasets
self.login( email='test3@bx.psu.edu' )
check_edit_page1( latest_3_lddas )
self.logout()
self.login( email='test@bx.psu.edu' )
# Change the permissions and test again
permissions = [ 'DATASET_ACCESS' ]
url = build_url( permissions, role_one )
self.home()
self.visit_url( url )
self.check_page_for_string( 'Permissions have been updated on 3 datasets' )
def check_edit_page2( lddas ):
# Make sure the permissions have been correctly updated for the 3 datasets. Permissions should
# be all of the above on any of the 3 datasets that are imported into a history
for ldda in lddas:
self.home()
self.visit_url( '%s/library/datasets?library_id=%s&do_action=add&ldda_ids=%s' % ( self.url, str( library_one.id ), str( ldda.id ) ) )
# Determine the new HistoryDatasetAssociation id created when the library dataset was imported into our history
last_hda_created = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
.first()
self.home()
self.visit_url( '%s/root/edit?id=%s' % ( self.url, str( last_hda_created.id ) ) )
self.check_page_for_string( 'Edit Attributes' )
self.check_page_for_string( last_hda_created.name )
self.check_page_for_string( 'View Permissions' )
self.check_page_for_string( last_hda_created.name )
try:
# This should no longer be possible
check_str = 'Manage dataset permissions and role associations of %s' % last_hda_created.name
self.check_page_for_string( check_str )
raise AssertionError( '%s incorrectly has DATASET_MANAGE_PERMISSIONS on datasets imported from a library' % admin_user.email )
except:
pass
try:
# This should no longer be possible
self.check_page_for_string( 'Role members can manage the roles associated with this dataset' )
raise AssertionError( '%s incorrectly has DATASET_MANAGE_PERMISSIONS on datasets imported from a library' % admin_user.email )
except:
pass
try:
# This should no longer be possible
self.check_page_for_string( 'Role members can import this dataset into their history for analysis' )
raise AssertionError( '%s incorrectly has DATASET_MANAGE_PERMISSIONS on datasets imported from a library' % admin_user.email )
except:
pass
check_edit_page2( latest_3_lddas )
self.home()
def test_165_upload_directory_of_files_from_libraries_view( self ):
"""Testing uploading a directory of files to a root folder from the Data Libraries view"""
# admin_user will not have the option sto upload a directory of files from the
# Libraries view since a sub-directory named the same as their email is not contained
# in the configured user_library_import_dir. However, since members of role_one have
# the LIBRARY_ADD permission, we can test this feature as regular_user1 or regular_user3
self.logout()
self.login( email=regular_user1.email )
message = 'Uploaded all files in test-data/users/test1...'
# Since regular_user1 does not have any sub-directories contained within her configured
# user_library_import_dir, the only option in her server_dir select list will be the
# directory named the same as her email
check_str_after_submit = "Added 1 datasets to the library '%s' ( each is selected )." % library_one.root_folder.name
self.add_dir_of_files_from_libraries_view( str( library_one.id ),
str( library_one.root_folder.id ),
regular_user1.email,
check_str_after_submit=check_str_after_submit,
message=message.replace( '+', ' ' ) )
self.home()
self.visit_page( 'library/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
self.check_page_for_string( regular_user1.email )
self.check_page_for_string( message )
self.logout()
self.login( regular_user3.email )
message = 'Uploaded all files in test-data/users/test3.../run1'
# Since regular_user2 has a subdirectory contained within her configured user_library_import_dir,
# she will have a "None" option in her server_dir select list
check_str1 = '<option>None</option>'
self.add_dir_of_files_from_libraries_view( str( library_one.id ),
str( library_one.root_folder.id ),
'run1',
check_str_after_submit=check_str_after_submit,
message=message.replace( '+', ' ' ) )
self.home()
self.visit_page( 'library/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
self.check_page_for_string( regular_user3.email )
self.check_page_for_string( message )
self.home()
self.logout()
self.login( email=admin_user.email )
def test_167_download_archive_of_library_files( self ):
"""Testing downloading an archive of files from the library"""
for format in ( 'tbz', 'tgz', 'zip' ):
archive = self.download_archive_of_library_files( str( library_one.id ),
( str( ldda_one.id ), str( ldda_two.id ) ),
format )
self.check_archive_contents( archive, ( ldda_one, ldda_two ) )
os.remove( archive )
def test_170_mark_group_deleted( self ):
"""Testing marking a group as deleted"""
# Logged in as admin_user
self.home()
self.visit_url( '%s/admin/groups' % self.url )
self.check_page_for_string( group_two.name )
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
sa_session.refresh( group_two )
if not group_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % group_two.name )
# Deleting a group should not delete any associations
if not group_two.members:
raise AssertionError( '%s incorrectly lost all members when it was marked as deleted.' % group_two.name )
if not group_two.roles:
raise AssertionError( '%s incorrectly lost all role associations when it was marked as deleted.' % group_two.name )
def test_175_undelete_group( self ):
"""Testing undeleting a deleted group"""
# Logged in as admin_user
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
sa_session.refresh( group_two )
if group_two.deleted:
raise AssertionError( '%s was not correctly marked as not deleted.' % group_two.name )
def test_180_mark_role_deleted( self ):
"""Testing marking a role as deleted"""
# Logged in as admin_user
self.home()
self.visit_url( '%s/admin/roles' % self.url )
self.check_page_for_string( role_two.name )
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
sa_session.refresh( role_two )
if not role_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % role_two.name )
# Deleting a role should not delete any associations
if not role_two.users:
raise AssertionError( '%s incorrectly lost all user associations when it was marked as deleted.' % role_two.name )
if not role_two.groups:
raise AssertionError( '%s incorrectly lost all group associations when it was marked as deleted.' % role_two.name )
def test_185_undelete_role( self ):
"""Testing undeleting a deleted role"""
# Logged in as admin_user
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_190_mark_dataset_deleted( self ):
"""Testing marking a library dataset as deleted"""
# Logged in as admin_user
self.home()
self.delete_library_item( str( library_one.id ), str( ldda_two.library_dataset.id ), ldda_two.name, library_item_type='library_dataset' )
self.home()
self.visit_page( 'library_admin/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
try:
# 2.bed was only contained in the library in 1 place, so it should no longer display
self.check_page_for_string( ldda_two.name )
raise AssertionError( "Dataset '%s' is incorrectly displayed in the library after it has been deleted." % ldda_two.name )
except:
pass
self.home()
def test_195_display_deleted_dataset( self ):
"""Testing displaying deleted dataset"""
# Logged in as admin_user
self.home()
self.visit_url( "%s/library_admin/browse_library?obj_id=%s&show_deleted=True" % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( ldda_two.name )
self.home()
def test_200_hide_deleted_dataset( self ):
"""Testing hiding deleted dataset"""
# Logged in as admin_user
self.home()
self.visit_url( "%s/library_admin/browse_library?obj_id=%s&show_deleted=False" % ( self.url, str( library_one.id ) ) )
try:
self.check_page_for_string( ldda_two.name )
raise AssertionError( "Dataset '%s' is incorrectly displayed in the library after it has been deleted." % ldda_two.name )
except:
pass
self.home()
def test_205_mark_folder_deleted( self ):
"""Testing marking a library folder as deleted"""
# Logged in as admin_user
self.home()
self.delete_library_item( str( library_one.id ), str( folder_two.id ), folder_two.name, library_item_type='folder' )
self.home()
self.visit_page( 'library_admin/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
try:
self.check_page_for_string( folder_two.name )
raise AssertionError( "Folder '%s' is incorrectly displayed in the library after it has been deleted." % folder_two.name )
except:
pass
self.home()
def test_210_mark_folder_undeleted( self ):
"""Testing marking a library folder as undeleted"""
# Logged in as admin_user
self.home()
self.undelete_library_item( str( library_one.id ), str( folder_two.id ), folder_two.name, library_item_type='folder' )
self.home()
self.visit_page( 'library_admin/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
self.check_page_for_string( folder_two.name )
try:
# 2.bed was deleted before the folder was deleted, so state should have been saved. In order
# fro 2.bed to be displayed, it would itself have to be marked undeleted.
self.check_page_for_string( ldda_two.name )
raise AssertionError( "Dataset '%s' is incorrectly displayed in the library after parent folder was undeleted." % ldda_two.name )
except:
pass
self.home()
def test_215_mark_library_deleted( self ):
"""Testing marking a library as deleted"""
# Logged in as admin_user
self.home()
# First mark folder_two as deleted to further test state saving when we undelete the library
self.delete_library_item( str( library_one.id ), str( folder_two.id ), folder_two.name, library_item_type='folder' )
self.delete_library_item( str( library_one.id ), str( library_one.id ), library_one.name, library_item_type='library' )
self.home()
self.visit_page( 'library_admin/deleted_libraries' )
self.check_page_for_string( library_one.name )
self.home()
def test_220_mark_library_undeleted( self ):
"""Testing marking a library as undeleted"""
# Logged in as admin_user
self.home()
self.undelete_library_item( str( library_one.id ), str( library_one.id ), library_one.name, library_item_type='library' )
self.home()
self.visit_page( 'library_admin/browse_library?obj_id=%s' % ( str( library_one.id ) ) )
self.check_page_for_string( library_one.name )
try:
# folder_two was marked deleted before the library was deleted, so it should not be displayed
self.check_page_for_string( folder_two.name )
raise AssertionError( "Deleted folder '%s' is incorrectly displayed in the library after the library was undeleted." % folder_two.name )
except:
pass
self.home()
def test_225_purge_user( self ):
"""Testing purging a user account"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
sa_session.refresh( regular_user3 )
self.purge_user( self.security.encode_id( regular_user3.id ), regular_user3.email )
sa_session.refresh( regular_user3 )
if not regular_user3.purged:
raise AssertionError( 'User %s was not marked as purged.' % regular_user3.email )
# Make sure DefaultUserPermissions deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.default_permissions ) != 1:
raise AssertionError( 'DefaultUserPermissions for user %s were not deleted.' % regular_user3.email )
for dup in regular_user3.default_permissions:
role = sa_session.query( galaxy.model.Role ).get( dup.role_id )
if role.type != 'private':
raise AssertionError( 'DefaultUserPermissions for user %s are not related with the private role.' % regular_user3.email )
# Make sure History deleted
for history in regular_user3.histories:
sa_session.refresh( history )
if not history.deleted:
raise AssertionError( 'User %s has active history id %d after their account was marked as purged.' % ( regular_user3.email, hda.id ) )
# NOTE: Not all hdas / datasets will be deleted at the time a history is deleted - the cleanup_datasets.py script
# is responsible for this.
# Make sure UserGroupAssociations deleted
if regular_user3.groups:
raise AssertionError( 'User %s has active group id %d after their account was marked as purged.' % ( regular_user3.email, uga.id ) )
# Make sure UserRoleAssociations deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.roles ) != 1:
raise AssertionError( 'UserRoleAssociations for user %s were not deleted.' % regular_user3.email )
for ura in regular_user3.roles:
role = sa_session.query( galaxy.model.Role ).get( ura.role_id )
if role.type != 'private':
raise AssertionError( 'UserRoleAssociations for user %s are not related with the private role.' % regular_user3.email )
def test_230_manually_unpurge_user( self ):
"""Testing manually un-purging a user account"""
# Logged in as admin_user
# Reset the user for later test runs. The user's private Role and DefaultUserPermissions for that role
# should have been preserved, so all we need to do is reset purged and deleted.
# TODO: If we decide to implement the GUI feature for un-purging a user, replace this with a method call
regular_user3.purged = False
regular_user3.deleted = False
sa_session.add( regular_user3 )
sa_session.flush()
def test_235_purge_group( self ):
"""Testing purging a group"""
# Logged in as admin_user
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
self.purge_group( self.security.encode_id( group_two.id ), group_two.name )
# Make sure there are no UserGroupAssociations
uga = sa_session.query( galaxy.model.UserGroupAssociation ) \
.filter( galaxy.model.UserGroupAssociation.table.c.group_id == group_two.id ) \
.first()
if uga:
raise AssertionError( "Purging the group did not delete the UserGroupAssociations for group_id '%s'" % group_two.id )
# Make sure there are no GroupRoleAssociations
gra = sa_session.query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.group_id == group_two.id ) \
.first()
if gra:
raise AssertionError( "Purging the group did not delete the GroupRoleAssociations for group_id '%s'" % group_two.id )
# Undelete the group for later test runs
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
def test_240_purge_role( self ):
"""Testing purging a role"""
# Logged in as admin_user
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
self.purge_role( self.security.encode_id( role_two.id ), role_two.name )
# Make sure there are no UserRoleAssociations
uras = sa_session.query( galaxy.model.UserRoleAssociation ) \
.filter( galaxy.model.UserRoleAssociation.table.c.role_id == role_two.id ) \
.all()
if uras:
raise AssertionError( "Purging the role did not delete the UserRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DefaultUserPermissions associated with the Role
dups = sa_session.query( galaxy.model.DefaultUserPermissions ) \
.filter( galaxy.model.DefaultUserPermissions.table.c.role_id == role_two.id ) \
.all()
if dups:
raise AssertionError( "Purging the role did not delete the DefaultUserPermissions for role_id '%s'" % role_two.id )
# Make sure there are no DefaultHistoryPermissions associated with the Role
dhps = sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.role_id == role_two.id ) \
.all()
if dhps:
raise AssertionError( "Purging the role did not delete the DefaultHistoryPermissions for role_id '%s'" % role_two.id )
# Make sure there are no GroupRoleAssociations
gra = sa_session.query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.role_id == role_two.id ) \
.first()
if gra:
raise AssertionError( "Purging the role did not delete the GroupRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DatasetPermissionss
dp = sa_session.query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.role_id == role_two.id ) \
.first()
if dp:
raise AssertionError( "Purging the role did not delete the DatasetPermissionss for role_id '%s'" % role_two.id )
def test_245_manually_unpurge_role( self ):
"""Testing manually un-purging a role"""
# Logged in as admin_user
# Manually unpurge, then undelete the role for later test runs
# TODO: If we decide to implement the GUI feature for un-purging a role, replace this with a method call
role_two.purged = False
sa_session.add( role_two )
sa_session.flush()
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_250_purge_library( self ):
"""Testing purging a library"""
# Logged in as admin_user
self.home()
self.delete_library_item( str( library_one.id ), str( library_one.id ), library_one.name, library_item_type='library' )
self.purge_library( str( library_one.id ), library_one.name )
# Make sure the library was purged
sa_session.refresh( library_one )
if not ( library_one.deleted and library_one.purged ):
raise AssertionError( 'The library id %s named "%s" has not been marked as deleted and purged.' % ( str( library_one.id ), library_one.name ) )
def check_folder( library_folder ):
for folder in library_folder.folders:
sa_session.refresh( folder )
# Make sure all of the library_folders are purged
if not folder.purged:
raise AssertionError( 'The library_folder id %s named "%s" has not been marked purged.' % ( str( folder.id ), folder.name ) )
check_folder( folder )
# Make sure all of the LibraryDatasets and associated objects are deleted
sa_session.refresh( library_folder )
for library_dataset in library_folder.datasets:
sa_session.refresh( library_dataset )
ldda = library_dataset.library_dataset_dataset_association
if ldda:
sa_session.refresh( ldda )
if not ldda.deleted:
raise AssertionError( 'The library_dataset_dataset_association id %s named "%s" has not been marked as deleted.' % \
( str( ldda.id ), ldda.name ) )
# Make sure all of the datasets have been deleted
dataset = ldda.dataset
sa_session.refresh( dataset )
if not dataset.deleted:
raise AssertionError( 'The dataset with id "%s" has not been marked as deleted when it should have been.' % \
str( ldda.dataset.id ) )
if not library_dataset.deleted:
raise AssertionError( 'The library_dataset id %s named "%s" has not been marked as deleted.' % \
( str( library_dataset.id ), library_dataset.name ) )
check_folder( library_one.root_folder )
def test_255_no_library_template( self ):
"""Test library features when library has no template"""
# Logged in as admin_user
name = "Library Two"
description = "This is Library Two"
# Create a library, adding no template
self.create_library( name=name, description=description )
self.visit_page( 'library_admin/browse_libraries' )
self.check_page_for_string( name )
self.check_page_for_string( description )
library_two = sa_session.query( galaxy.model.Library ) \
.filter( and_( galaxy.model.Library.table.c.name==name,
galaxy.model.Library.table.c.description==description,
galaxy.model.Library.table.c.deleted==False ) ) \
.first()
assert library_two is not None, 'Problem retrieving library named "%s" from the database' % name
# Add a dataset to the library
self.add_library_dataset( 'library_admin',
'7.bed',
str( library_two.id ),
str( library_two.root_folder.id ),
library_two.root_folder.name,
file_type='bed',
dbkey='hg18',
message='',
root=True )
ldda_seven = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_seven is not None, 'Problem retrieving LibraryDatasetDatasetAssociation ldda_seven from the database'
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_two.id ) ) )
self.check_page_for_string( "7.bed" )
self.check_page_for_string( admin_user.email )
# TODO: add a functional test to cover adding a library dataset via url_paste here...
# TODO: Add a functional test to cover checking the space_to_tab checkbox here...
# Delete and purge the library
self.home()
self.delete_library_item( str( library_two.id ), str( library_two.id ), library_two.name, library_item_type='library' )
self.purge_library( str( library_two.id ), library_two.name )
self.home()
def test_260_library_permissions( self ):
"""Test library permissions"""
# Logged in as admin_user
name = "Library Three"
description = "This is Library Three"
# Create a library, adding no template
self.create_library( name=name, description=description )
self.visit_page( 'library_admin/browse_libraries' )
self.check_page_for_string( name )
self.check_page_for_string( description )
global library_three
library_three = sa_session.query( galaxy.model.Library ) \
.filter( and_( galaxy.model.Library.table.c.name==name,
galaxy.model.Library.table.c.description==description,
galaxy.model.Library.table.c.deleted==False ) ) \
.first()
assert library_three is not None, 'Problem retrieving library named "%s" from the database' % name
# Set library permissions for regular_user1 and regular_user2. Each of these users will be permitted to
# LIBRARY_ADD, LIBRARY_MODIFY, LIBRARY_MANAGE for library items.
permissions_in = [ k for k, v in galaxy.model.Library.permitted_actions.items() ]
permissions_out = []
role_ids_str = '%s,%s' % ( str( regular_user1_private_role.id ), str( regular_user2_private_role.id ) )
self.set_library_permissions( str( library_three.id ), library_three.name, role_ids_str, permissions_in, permissions_out )
self.logout()
# Login as regular_user1 and make sure they can see the library
self.login( email=regular_user1.email )
self.visit_url( '%s/library/browse_libraries' % self.url )
self.check_page_for_string( name )
self.logout()
# Login as regular_user2 and make sure they can see the library
self.login( email=regular_user2.email )
self.visit_url( '%s/library/browse_libraries' % self.url )
self.check_page_for_string( name )
# Add a dataset to the library
message = 'Testing adding 1.bed to Library Three root folder'
self.add_library_dataset( 'library',
'1.bed',
str( library_three.id ),
str( library_three.root_folder.id ),
library_three.root_folder.name,
file_type='bed',
dbkey='hg18',
message=message.replace( ' ', '+' ),
root=True )
# Add a folder to the library
name = "Root Folder's Folder X"
description = "This is the root folder's Folder X"
self.add_folder( 'library',
str( library_three.id ),
str( library_three.root_folder.id ),
name=name,
description=description )
global folder_x
folder_x = sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==library_three.root_folder.id,
galaxy.model.LibraryFolder.table.c.name==name,
galaxy.model.LibraryFolder.table.c.description==description ) ) \
.first()
# Add an information template to the folder
template_name = 'Folder Template 1'
self.add_folder_info_template( 'library',
str( library_one.id ),
str( folder_x.id ),
str( form_one.id ),
form_one.name )
# Modify the folder's information
contents = '%s folder contents' % form_one_field_label
new_name = "Root Folder's Folder Y"
new_description = "This is the root folder's Folder Y"
self.edit_folder_info( 'library',
str( folder_x.id ),
str( library_three.id ),
name,
new_name,
new_description,
contents=contents,
field_name=form_one_field_name )
# Twill barfs when self.check_page_for_string() is called after dealing with an information template,
# the exception is: TypeError: 'str' object is not callable
# the work-around it to end this method so any calls are in the next method.
def test_265_template_features_and_permissions( self ):
"""Test library template and more permissions behavior from the Data Libraries view"""
# Logged in as regular_user2
sa_session.refresh( folder_x )
# Add a dataset to the folder
message = 'Testing adding 2.bed to Library Three root folder'
self.add_library_dataset( 'library',
'2.bed',
str( library_three.id ),
str( folder_x.id ),
folder_x.name,
file_type='bed',
dbkey='hg18',
message=message.replace( ' ', '+' ),
root=False )
global ldda_x
ldda_x = sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
assert ldda_x is not None, 'Problem retrieving ldda_x from the database'
# Add an information template to the library
template_name = 'Library Template 3'
self.add_library_info_template( 'library',
str( library_three.id ),
str( form_one.id ),
form_one.name )
# Add information to the library using the template
contents = '%s library contents' % form_one_field_label
self.visit_url( '%s/library/library?obj_id=%s&information=True' % ( self.url, str( library_three.id ) ) )
# There are 2 forms on this page and the template is the 2nd form
tc.fv( '2', form_one_field_name, contents )
tc.submit( 'edit_info_button' )
# For some reason, the following check:
# self.check_page_for_string ( 'The information has been updated.' )
# ...throws the following exception - I have not idea why!
# TypeError: 'str' object is not callable
# The work-around is to not make ANY self.check_page_for_string() calls until the next method
def test_270_permissions_as_different_regular_user( self ):
"""Test library template and more permissions behavior from the Data Libraries view as a different user"""
# Log in as regular_user2
self.logout()
self.login( email=regular_user1.email )
self.visit_url( '%s/library/browse_library?obj_id=%s' % ( self.url, str( library_three.id ) ) )
self.check_page_for_string( ldda_x.name )
def test_275_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as regular_user2
self.logout()
self.login( email=admin_user.email )
self.delete_library_item( str( library_three.id ), str( library_three.id ), library_three.name, library_item_type='library' )
self.purge_library( str( library_three.id ), library_three.name )
##################
# Eliminate all non-private roles
##################
for role in [ role_one, role_two, role_three ]:
self.mark_role_deleted( self.security.encode_id( role.id ), role.name )
self.purge_role( self.security.encode_id( role.id ), role.name )
# Manually delete the role from the database
sa_session.refresh( role )
sa_session.delete( role )
sa_session.flush()
##################
# Eliminate all groups
##################
for group in [ group_zero, group_one, group_two ]:
self.mark_group_deleted( self.security.encode_id( group.id ), group.name )
self.purge_group( self.security.encode_id( group.id ), group.name )
# Manually delete the group from the database
sa_session.refresh( group )
sa_session.delete( group )
sa_session.flush()
##################
# Make sure all users are associated only with their private roles
##################
for user in [ admin_user, regular_user1, regular_user2, regular_user3 ]:
sa_session.refresh( user )
if len( user.roles) != 1:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 1 )' % ( len( user.roles ), user.email ) )
#####################
# Reset DefaultHistoryPermissions for regular_user1
#####################
self.logout()
self.login( email=regular_user1.email )
# Change DefaultHistoryPermissions for regular_user1 back to the default
permissions_in = [ 'DATASET_MANAGE_PERMISSIONS' ]
permissions_out = [ 'DATASET_ACCESS' ]
self.user_set_default_permissions( permissions_in=permissions_in, permissions_out=permissions_out, role_id=str( regular_user1_private_role.id ) )
self.logout()
self.login( email=admin_user.email )
|
volpino/Yeps-EURAC
|
test/functional/test_security_and_libraries.py
|
Python
|
mit
| 122,639
|
[
"Galaxy",
"VisIt"
] |
4f35c64651a5989124d4e4aac545b925f0aa2f4139df3230db88b074f596e738
|
# -*- coding: latin-1 -*-
##############################################################################
# Copyright (C) 2012 by BSC-CNS #
# Author: Carlos Tripiana Montes <carlos.tripiana@bsc.es> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, visit the following webpage: #
# http://www.gnu.org/licenses/gpl.html #
##############################################################################
##############################################################################
# INTRUCTIONS: #
# #
# Before executing the script, make sure the state file loads properly. #
# Check file paths and options, depending which version of paraview do #
# you use. #
# Finally test the state file loading it into paraview. If this works #
# the script should work. #
##############################################################################
# Imports
import argparse
import array
import math
import shutil
from paraview.simple import *
# Constants
MAX_DIMENSIONS = 3
# Parse parameters
parser = argparse.ArgumentParser(description = "Process the arguments")
parser.add_argument("stateFile", help = "Path to the state file (PVSM)")
parser.add_argument("outputDir", help = "Path to the output directory (must be writable)")
args = parser.parse_args()
print "STARTING"
# Load the state
paraview.servermanager.LoadState(args.stateFile)
# Make sure the view in the state is the active one
activeView = paraview.simple.GetRenderView()
paraview.simple.SetActiveView(activeView)
# Get the list of sources
sources = paraview.simple.GetSources()
# PROLOG: Create the variables and pipeline objects we will need
rangesList = []
index = 0
surface = paraview.servermanager.filters.ExtractSurface(NonlinearSubdivisionLevel = 1, PieceInvariant = 1)
triangulator = paraview.servermanager.filters.Triangulate()
writer = paraview.servermanager.writers.PSTLWriter(FileType = 2)
calculator = paraview.servermanager.filters.Calculator(AttributeMode = "point_data", CoordinateResults = 1, ReplaceInvalidResults = 0, ReplacementValue = 0.0, ResultArrayName = "POINTS")
# 1st STEP: Get the ranges' values for each visible source
# Iterate over all sources
for source in sources.items():
# If the source is in the view and it's visible
representation = paraview.servermanager.GetRepresentation(source[1], activeView)
if representation != None and representation.Visibility == 1 :
# Get the data name -> data info
dataName = representation.ColorArrayName
dataInfo = source[1].PointData[dataName]
# If the source has any kind of data associated
if dataInfo != None :
lookupTable = representation.LookupTable
newRange = []
# Check what type of mapping we should do
if lookupTable.VectorMode == "Component" :
# Select the appropriate component
if lookupTable.VectorComponent == 0 :
newRange.append(dataName + "_X")
elif lookupTable.VectorComponent == 1 :
newRange.append(dataName + "_Y")
elif lookupTable.VectorComponent == 2 :
newRange.append(dataName + "_Z")
else :
print "Skipping dimension higher than " + MAX_DIMENSIONS + " for " + source[0][0] + "_" + source[0][1] + "_" + dataName
newRange = None
# endif
if newRange != None :
# If there is a custom range we use it, otherwise we use the temporal (fixed range) max and min limits
if lookupTable.ScalarRangeInitialized and lookupTable.LockScalarRange :
newRange.append([lookupTable.RGBPoints[0], lookupTable.RGBPoints[-4] ])
else :
# Look up the input source
inputSource = source[1]
while "Input" in dir(inputSource) :
inputSource = inputSource.Input
# endwhile
# Is this file an animation or not?
if "TimestepValues" in dir(inputSource) and inputSource.TimestepValues.__str__() != "None" :
newRange.append([float("+inf"), float("-inf")])
# Iterate over the input source steps
timeSteps = inputSource.TimestepValues
for timeStep in timeSteps :
# Go to the step
source[1].UpdatePipeline(timeStep)
# Get the data info -> data range for this step
dataInfo = source[1].PointData[dataName]
# Retrieve the range for that component
tempRange = dataInfo.GetRange(lookupTable.VectorComponent)
# Find the min and max over all steps
if tempRange[0] < newRange[1][0] :
newRange[1][0] = tempRange[0]
# endif
if tempRange[1] > newRange[1][1] :
newRange[1][1] = tempRange[1]
# endif
# endfor
# endif
else :
# Default limits for this source and component
newRange.append(dataInfo.GetRange(lookupTable.VectorComponent) )
# endif
# endif
rangesList.append(newRange)
else :
rangesList.append(None)
# endif
elif lookupTable.VectorMode == "Magnitude" :
# Query the number of dimensions for this data
numDimensions = source[1].GetDataInformation().DataInformation.GetPointDataInformation().GetArrayInformation(dataName).GetNumberOfComponents()
# Select the appropriate expresion for the components
if numDimensions == 1 :
newRange.append(dataName)
elif numDimensions > 1 and numDimensions <= MAX_DIMENSIONS :
newRange.append("mag(" + dataName + ")")
else :
print "Skipping dimension higher than " + MAX_DIMENSIONS + " for " + source[0][0] + "_" + source[0][1] + "_" + dataName
newRange = None
# endif
if newRange != None :
# If there is a custom range we use it, otherwise we use the temporal (fixed range) max and min limits
if lookupTable.ScalarRangeInitialized and lookupTable.LockScalarRange :
newRange.append([lookupTable.RGBPoints[0], lookupTable.RGBPoints[-4] ])
else :
# Look up the input source
inputSource = source[1]
while "Input" in dir(inputSource) :
inputSource = inputSource.Input
# endwhile
# Is this file an animation or not?
if "TimestepValues" in dir(inputSource) and inputSource.TimestepValues.__str__() != "None" :
newRange.append([float("+inf"), float("-inf")])
# Iterate over the input source steps
timeSteps = inputSource.TimestepValues
for timeStep in timeSteps :
# Go to the step
source[1].UpdatePipeline(timeStep)
# Get the data info -> data range for this step
dataInfo = source[1].PointData[dataName]
# Calculate the range for a scalar or a vector
if numDimensions == 1 :
# Retrieve the range for the value
tempRange = dataInfo.GetRange()
# Find the min and max over all steps
if tempRange[0] < newRange[1][0] :
newRange[1][0] = tempRange[0]
# endif
if tempRange[1] > newRange[1][1] :
newRange[1][1] = tempRange[1]
# endif
else :
# Retrieve the range for the magnitude
tempRange = dataInfo.GetRange(-1)
# Find the min and max over all steps
if tempRange[0] < newRange[1][0] :
newRange[1][0] = tempRange[0]
# endif
if tempRange[1] > newRange[1][1] :
newRange[1][1] = tempRange[1]
# endif
# endif
# endfor
# endif
else :
# Calculate the range for a scalar or a vector
if numDimensions == 1 :
# Retrieve the range for the value
newRange.append(dataInfo.GetRange() )
else :
# Retrieve the range for the magnitude
newRange.append(dataInfo.GetRange(-1) )
# endif
# endif
# endif
rangesList.append(newRange)
else :
rangesList.append(None)
# endif
else :
print "Skipping unhandled data mapping " + lookupTable.VectorMode + " for " + source[0][0] + "_" + source[0][1] + "_" + dataName
newRange = None
# endif
else :
rangesList.append(None)
# endif
# endif
# endfor
print "Choosen ranges are: " + rangesList.__str__() + "\n"
# 2nd STEP: Save the geometry for each visible source (and for each step, if it is an animation)
# 3rd STEP: store the facets' vertices' values for each source and step
# Iterate over all sources
for source in sources.items() :
# If the source is in the view and it's visible
representation = paraview.servermanager.GetRepresentation(source[1], activeView)
if representation != None and representation.Visibility == 1 :
# We only need those sources which have any data associated
if rangesList[index] != None :
# Get the data name -> data info
dataName = representation.ColorArrayName
dataInfo = source[1].PointData[dataName]
# If the source has any kind of data associated
if dataInfo != None :
# Generate the function to map the data into coordinates (forces interpolation and reduces the size of the data)
calculator.Function = "iHat * " + rangesList[index][0] + " + jHat * " + rangesList[index][1][0].__str__() + " + kHat * " + rangesList[index][1][1].__str__()
# Look up the input source
inputSource = source[1]
while "Input" in dir(inputSource) :
inputSource = inputSource.Input
# endwhile
# Is this file an animation or not?
if "TimestepValues" in dir(inputSource) and inputSource.TimestepValues.__str__() != "None" :
# Iterate over the input source steps
timeSteps = inputSource.TimestepValues
outputMeshFile = args.outputDir + "/" + source[0][0] + "_" + source[0][1] + "_step_"
outputDataFile = args.outputDir + "/" + source[0][0] + "_" + source[0][1] + "_" + rangesList[index][0] + "_step_"
step = 0
for timeStep in timeSteps :
# Go to the step
source[1].UpdatePipeline(timeStep)
print "Extracting mesh: " + outputMeshFile + str(step).zfill(3) + ".stl"
# In case the source is not a polygonal mesh
surface.Input = source[1]
surface.UpdatePipeline(timeStep)
triangulator.Input = surface
triangulator.UpdatePipeline(timeStep)
# Write the binary STL output file for this object and step
writer.Input = triangulator
writer.FileName = outputMeshFile + str(step).zfill(3) + "_.stl"
writer.UpdatePipeline(timeStep)
# Some writers add the number of the step but we are doing the animation by hand
try :
shutil.move(outputMeshFile + str(step).zfill(3) + "_0.stl", outputMeshFile + str(step).zfill(3) + ".stl")
except IOError :
shutil.move(outputMeshFile + str(step).zfill(3) + "_.stl", outputMeshFile + str(step).zfill(3) + ".stl")
# endtry
print " -> Extracted mesh: " + outputMeshFile + str(step).zfill(3) + ".stl"
print "Extracting data: " + outputDataFile + str(step).zfill(3) + ".bin"
# Gets the associated data and replaces the coordinates with its value for each point
calculator.Input = triangulator
calculator.UpdatePipeline(timeStep)
# Write the binary STL output file for this data and step
writer.Input = calculator
writer.FileName = outputDataFile + str(step).zfill(3) + "_.stl"
writer.UpdatePipeline(timeStep)
# Some writers add the number of the step but we are doing the animation by hand
try :
shutil.move(outputDataFile + str(step).zfill(3) + "_0.stl", outputDataFile + str(step).zfill(3) + ".bin")
except IOError :
shutil.move(outputDataFile + str(step).zfill(3) + "_.stl", outputDataFile + str(step).zfill(3) + ".bin")
# endtry
print " -> Extracted data: " + outputDataFile + str(step).zfill(3) + ".bin"
step = step + 1
# endif
else :
outputMeshFile = args.outputDir + "/" + source[0][0] + "_" + source[0][1] + ".stl"
outputDataFile = args.outputDir + "/" + source[0][0] + "_" + source[0][1] + "_" + rangesList[index][0]
print "Extracting mesh: " + outputMeshFile
# In case the source is not a polygonal mesh
surface.Input = source[1]
surface.UpdatePipeline()
triangulator.Input = surface
triangulator.UpdatePipeline()
# Write the binary STL output file for this object
writer.Input = triangulator
writer.FileName = outputMeshFile
writer.UpdatePipeline()
print " -> Extracted mesh: " + outputMeshFile
print "Extracting data: " + outputDataFile + ".bin"
# Gets the associated data and replaces the coordinates with its value for each point
calculator.Input = triangulator
calculator.UpdatePipeline()
# Write the binary STL output file for this data
writer.Input = calculator
writer.FileName = outputDataFile + ".stl"
writer.UpdatePipeline()
shutil.move(outputDataFile + ".stl", outputDataFile + ".bin")
print " -> Extracted data: " + outputDataFile + ".bin"
# endif
print ""
# endif
# endif
index = index + 1
# endif
# endfor
print "DONE"
|
fercook/SciViz
|
Geometries/STL_from_Paraview/stl_extractor.py
|
Python
|
gpl-2.0
| 17,575
|
[
"ParaView",
"VisIt"
] |
7c025f042ef85d8a1b3af6b30ca179694306eb28cf442552a954d20ccabb16af
|
#!/usr/bin/env python3
#
# Code related to ESET's Linux/Moose research
# For feedback or questions contact us at: github@eset.com
# https://github.com/eset/malware-research/
# Olivier Bilodeau <bilodeau@eset.com>
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2015 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from pprint import pprint
import re
import socket
from struct import unpack
import sys
def decrypt_cnc_msg(ct):
"""
Decrypt strings
ct: bytearray of the ciphertext
returns bytearray of the plaintext
"""
# seed
k = 0xff
for i in reversed(range(len(ct))):
# XOR with previous
k = ct[i] = ct[i] ^ k
return ct
def parse_cnc1_config(f):
data = {}
# The IP that the C&C sees you coming from
# used to call it magic_bytes
data['cnccfg_ext_ip'] = socket.inet_ntoa(f.read(4))
data['cnccfg_nb_thdscan_local'] = unpack('I', f.read(4))[0]
data['cnccfg_nb_thdscan_ext'] = unpack('I', f.read(4))[0]
# the 3rd word is a bitfield
third_field = unpack('I', f.read(4))[0]
data['cnccfg_flag_scanner_sniffer'] = bool(third_field & 1)
data['cnccfg_flag_nolocalscan'] = bool(third_field & 2)
data['cnccfg_flag_noextscan'] = bool(third_field & 4)
data['cnccfg_flag_test10073'] = bool(third_field & 8)
data['cnccfg_flag_nattraversal'] = bool(third_field & 16)
# this one is only stored on the second contact with the C&C
data['cnccfg_flag_recontactcnc'] = bool(third_field & 32)
# in new samples this also controls if we report telnet login to C&C 3
# hijackdns must be ON to report telnet logins
data['cnccfg_flag_hijackdns'] = bool(third_field & 64)
# a sniffer is activated only if cnccfg_flag_scanner_sniffer is ON
# on every non-loopback interfaces
data['cnccfg_flag_thd_sniffer'] = bool(third_field & 128)
data['cnccfg_flag_killprocess'] = bool(third_field & 1024)
data['cnccfg_flag_share_peers'] = bool(third_field & 2048)
data['cnccfg_proxy_max_clients'] = unpack('I', f.read(4))[0]
# time to sleep between interactions with C&C 2
data['cnccfg_relaycnc_sleep'] = unpack('I', f.read(4))[0]
data['cnccfg_reportcnc_ip'] = socket.inet_ntoa(f.read(4))
data['cnccfg_relaycnc_ip'] = socket.inet_ntoa(f.read(4))
data['cnccfg_relaycnc_timeout'] = unpack('I', f.read(4))[0]
data['cnccfg_hijackdns1_ip'] = unpack('I', f.read(4))[0]
data['cnccfg_hijackdns2_ip'] = unpack('I', f.read(4))[0]
return data
def parse_cnc1_cracklist(f, results=False):
data = {}
data['userpass_list_len'] = unpack('I', f.read(4))[0]
# user/pass list
pass_list = bytearray(f.read(data['userpass_list_len']))
if results:
data['wordlist'] = decrypt_cnc_msg(pass_list)
return data
def parse_cnc1_whitelist_seg(f):
"""deals with server whitelist allowed to connect to 10073"""
data = {}
# size then ip / flag pairs until size * 8
data['whitelist_len'] = unpack('I', f.read(4))[0]
whlst = list()
for pair in range(data['whitelist_len']):
whlst_entry = {}
whlst_entry['ip'] = socket.inet_ntoa(f.read(4))
whlst_flags = unpack('I', f.read(4))[0]
whlst_entry['can_email'] = bool(whlst_flags & 1)
whlst.append(whlst_entry)
data['whitelist'] = whlst
return data
def parse_cnc1_sniffer_config(f):
data = {}
# optional data
# consumed if cnccfg_flag_thd_sniffer is set
opt_data = f.read(4)
if opt_data:
data['snfcfg_nb_items'] = unpack('I', opt_data)[0]
for i in range(data['snfcfg_nb_items']):
snfcfg_item_size = unpack('I', f.read(4))[0]
snfcfg_item = decrypt_cnc_msg(bytearray(f.read(snfcfg_item_size)))
#data['snfcfg_{:02d}_len'.format(i)] = snfcfg_item_size
data['snfcfg_{:02d}_needle'.format(i)] = snfcfg_item.decode('ascii')
return data
def parse_cnc_request(f):
data = {}
# bot version
# we've seen 0x1C, 0x1D, 0x1F
data['version'] = unpack('I', f.read(4))[0]
data['msg_type'] = unpack('I', f.read(4))[0]
if data['msg_type'] == 0x01:
data = parse_cnc_request_config(f, data)
f.read(8)
elif data['msg_type'] == 0x0E:
data['msg_type_decoded'] = 'REPORT_TELNET_LOGIN'
data['ipaddr'] = socket.inet_ntoa(f.read(4))
f.read(28)
elif data['msg_type'] == 0x0F:
data = parse_cnc_request_infect(f, data)
f.read(20)
elif data['msg_type'] == 0x14:
data['msg_type_decoded'] = 'REPORT_SNIFF'
data['pkt_len'] = unpack('I', f.read(4))[0]
f.read(28)
# REPORT_SNIFF additional payload
if data['msg_type'] == 0x14:
ct = bytearray(f.read(data['pkt_len']))
data['sniff_payload'] = decrypt_cnc_msg(ct)
# REPORT_GOT_SHELL additional payload
if data['msg_type'] == 0x0f:
data['cpu_model_len'] = unpack('I', f.read(4))[0]
data['cpu_model'] = decrypt_cnc_msg(bytearray(f.read(data['cpu_model_len'])))
data['processor_len'] = unpack('I', f.read(4))[0]
data['processor'] = decrypt_cnc_msg(bytearray(f.read(data['processor_len'])))
return data
def parse_cnc_request_config(f, data):
data['msg_type_decoded'] = 'REQUEST_CONFIG'
data['loop_count'] = unpack('I', f.read(4))[0]
data['nb_localscans'] = unpack('I', f.read(4))[0]
data['nb_extscans'] = unpack('I', f.read(4))[0]
# number of scans done in per-interface scan threads
data['nb_ifscans'] = unpack('I', f.read(4))[0]
data['nb_killed'] = unpack('I', f.read(4))[0]
# 0x1C is a bitfield
off_1C = unpack('I', f.read(4))[0]
data['flag_BRUTEFORCE_LIST'] = bool(off_1C & 1)
data['flag_WRITE_ACCESS'] = bool(off_1C & 2)
data['flag_TIME_PROBLEM'] = bool(off_1C & 128)
#data['flag_'] = bool(off_1C & 8)
return data
def parse_cnc_request_infect(f, data):
data['msg_type_decoded'] = 'REPORT_GOT_SHELL'
data['ipaddr'] = socket.inet_ntoa(f.read(4))
data['lst_userpass_offset'] = unpack('I', f.read(4))[0]
# 0x10 is a bitfield
data['infect_state'] = unpack('I', f.read(4))[0]
off_10 = data['infect_state']
data['infect_state_NO_CHMOD'] = bool(off_10 & 1)
data['infect_state_NO_ECHO'] = bool(off_10 & 2)
data['infect_state_FOUND_NEAR_SCAN'] = bool(off_10 & 4)
data['infect_state_PS_BLKLST_HIT'] = bool(off_10 & 0x80)
return data
def parse_cnc3_response(f):
data = {}
idx = 0
data_len = unpack('I', f.read(4))[0]
while data_len != 0:
data['cmd_{!s}'.format(idx)] = decrypt_cnc_msg(bytearray(f.read(data_len))).decode('ascii')
data_len = unpack('I', f.read(4))[0]
idx+=1
return data
def parse_rnde_query(s):
"""
Data from our sinkhole will be full of 127.x.y.z IPs. This is because
our host configuration makes the malware reply to it. You can ignore these
hits which are local only
"""
data = {}
m = re.search(r'^\/xx\/rnde\.php\?p=(-?\d+)&f=(\d+)&m=(\d)$', s)
if not m:
data['error'] = "PARSING ERROR!"
return data
# decrypt IP
packed_ip = (int(m.group(1)) ^ 0x7890ABCD).to_bytes(4, byteorder='little', signed=True)
data['ip_reported'] = socket.inet_ntoa(packed_ip)
# byte order (endianness)
if int(m.group(2)) == 0:
data['reporter_byteorder'] = 'big'
else:
data['reporter_byteorder'] = 'little'
# thd_scanner config param: was scan for external or close?
data['flag_ext_scan'] = bool(int(m.group(3)))
return data
|
eset/malware-research
|
moose/lib/elan2.py
|
Python
|
bsd-2-clause
| 8,851
|
[
"MOOSE"
] |
7802691b41a6801b29f56bef8a2e6a8a816d10cf0bf5d75bce27e82145a2d62a
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""Lots of under-the-rug, operational garbage in here. Run. Run away."""
import os
import sys
import json
import copy
import platform
from tabulate import tabulate
# yes, this library is imported but never used, but don't remove it
# unless you want to explode `bottle`:
import pkg_resources
anvio_version = '7.1-dev'
anvio_codename = 'hope' # after Hope E. Hopps, https://sivb.org/awards/student-awards/hope-e-hopps-award.html
# see the release notes for details: https://github.com/merenlab/anvio/releases/tag/v7
anvio_version_for_help_docs = "main" if anvio_version.endswith('dev') else anvio_version
DEBUG = '--debug' in sys.argv
FORCE = '--force' in sys.argv
QUIET = '--quiet' in sys.argv
NO_PROGRESS = '--no-progress' in sys.argv
AS_MARKDOWN = '--as-markdown' in sys.argv
FIX_SAD_TABLES = '--fix-sad-tables' in sys.argv
DISPLAY_DB_CALLS = '--display-db-calls' in sys.argv
FORCE_USE_MY_TREE = '--force-use-my-tree' in sys.argv
DEBUG_AUTO_FILL_ANVIO_DBS = '--debug-auto-fill-anvio-dbs' in sys.argv
USER_KNOWS_IT_IS_NOT_A_GOOD_IDEA = '--I-know-this-is-not-a-good-idea' in sys.argv
DOCS_PATH = os.path.join(os.path.dirname(__file__), 'docs')
TMP_DIR = None
# if the user wants to use a non-default tmp directory, we set it here
if '--tmp-dir' in sys.argv:
try:
idx = sys.argv.index('--tmp-dir')
TMP_DIR = os.path.abspath(sys.argv[idx+1])
if not os.path.exists(TMP_DIR):
parent_dir = os.path.dirname(TMP_DIR)
if os.access(parent_dir, os.W_OK):
os.makedirs(TMP_DIR)
else:
raise OSError(f"You do not have permission to generate a directory in '{parent_dir}'")
if not os.path.isdir(TMP_DIR):
raise OSError(f"The path provided to --tmp-dir, {TMP_DIR}, is not a directory...")
if not os.access(TMP_DIR, os.W_OK):
raise OSError(f"You do not have permission to generate files in '{TMP_DIR}'")
os.environ['TMPDIR'] = TMP_DIR
except Exception as e:
print("OSError: ", e)
sys.exit()
def P(d, dont_exit=False):
"""Poor man's debug output printer during debugging."""
print(json.dumps(d, indent=2))
if not dont_exit:
sys.exit()
def TABULATE(table, header, numalign="right", max_width=0):
"""Encoding-safe `tabulate`"""
tablefmt = "fancy_grid" if sys.stdout.encoding == "UTF-8" else "grid"
table = tabulate(table, headers=header, tablefmt=tablefmt, numalign=numalign)
if max_width:
# let's don't print everything if things need to be cut.
prefix = " // "
lines_in_table = table.split('\n')
if len(lines_in_table[0]) + len(prefix) + 2 > max_width:
table = '\n'.join([l[:max_width - len(prefix)] + prefix + l[-2:] for l in lines_in_table])
print(table)
# Make sure the Python environment hasn't changed since the installation (happens more often than you'd think
# on systems working with multiple Python installations that are managed through modules):
try:
if sys.version_info.major != 3 or sys.version_info.minor < 5:
sys.stderr.write("Sad face :( Your active Python version is %s, but anvi'o only works with Python version 3.5.0 or later.\n" % (platform.python_version()))
sys.exit(-1)
except Exception:
sys.stderr.write("(anvi'o failed to learn about your Python version, but it will pretend as if nothing happened)\n\n")
import anvio.constants as constants
# a comprehensive arguments dictionary that provides easy access from various programs that interface anvi'o modules:
D = {
'profile-db': (
['-p', '--profile-db'],
{'metavar': "PROFILE_DB",
'required': True,
'help': "Anvi'o profile database"}
),
'genes-db': (
['--genes-db'],
{'metavar': "GENES_DB",
'required': True,
'help': "Anvi'o genes database"}
),
'pan-db': (
['-p', '--pan-db'],
{'metavar': "PAN_DB",
'required': True,
'help': "Anvi'o pan database"}
),
'pan-or-profile-db': (
['-p', '--pan-or-profile-db'],
{'metavar': "PAN_OR_PROFILE_DB",
'required': True,
'help': "Anvi'o pan or profile database (and even genes database in appropriate contexts)."}
),
'genomes-storage': (
['-g', '--genomes-storage'],
{'metavar': "GENOMES_STORAGE",
'required': False,
'help': "Anvi'o genomes storage file"}
),
'structure-db': (
['-s', '--structure-db'],
{'metavar': "STRUCTURE_DB",
'required': True,
'help': "Anvi'o structure database."}
),
'only-if-structure': (
['--only-if-structure'],
{'default': False,
'action': 'store_true',
'help': "If provided, your genes of interest will be further subset to only include "
"genes with structures in your structure database, and therefore must be supplied in "
"conjunction with a structure database, i.e. `-s <your_structure_database>`. If you did "
"not specify genes of interest, ALL genes will be subset to those that have "
"structures."}
),
'genomes-names': (
['-G', '--genome-names'],
{'metavar': "GENOME_NAMES",
'required': False,
'help': "Genome names to 'focus'. You can use this parameter to limit the genomes included in your analysis. "
"You can provide these names as a comma-separated list of names, or you can put them in a file, "
"where you have a single genome name in each line, and provide the file path."}
),
'blank-profile': (
['--blank-profile'],
{'default': False,
'action': 'store_true',
'help': "If you only have contig sequences, but no mapping data (i.e., you found a genome and would like to "
"take a look from it), this flag will become very handy. After creating a contigs database for your "
"contigs, you can create a blank anvi'o profile database to use anvi'o interactive "
"interface with that contigs database without any mapping data."}
),
'contigs-db': (
['-c', '--contigs-db'],
{'metavar': 'CONTIGS_DB',
'required': True,
'help': "Anvi'o contigs database generated by 'anvi-gen-contigs-database'"}
),
'runinfo': (
['-r', '--runinfo'],
{'metavar': 'RUNINFO_PATH',
'required': True,
'help': "Anvi'o runinfo file path."}
),
'description': (
['--description'],
{'metavar': 'TEXT_FILE',
'required': False,
'help': "A plain text file that contains some description about the project. You can use Markdown syntax. "
"The description text will be rendered and shown in all relevant interfaces, including the "
"anvi'o interactive interface, or anvi'o summary outputs."}
),
'additional-view': (
['-V', '--additional-view'],
{'metavar': 'ADDITIONAL_VIEW',
'help': "A TAB-delimited file for an additional view to be used in the interface. This "
"file should contain all split names, and values for each of them in all "
"samples. Each column in this file must correspond to a sample name. Content "
"of this file will be called 'user_view', which will be available as a new item "
"in the 'views' combo box in the interface"}
),
'dna-sequence': (
['--dna-sequence'],
{'metavar': 'DNA SEQ',
'help': "Literally a DNA sequence. For the very lazy."}
),
'fasta-file': (
['-f', '--fasta-file'],
{'metavar': 'FASTA file',
'help': "A FASTA-formatted input file."}
),
'fasta-text-file': (
['-f', '--fasta-text-file'],
{'metavar': 'FASTA_TEXT_FILE',
'dest': 'fasta_text_file',
'help': "A two-column TAB-delimited file that lists multiple FASTA files to import "
"for analysis. If using for `anvi-dereplicate-genomes` or `anvi-compute-distance`, "
"each FASTA is assumed to be a genome. The first item in the header line "
"should read 'name', and the second item should read 'path'. Each line "
"in the field should describe a single entry, where the first column is "
"the name of the FASTA file or corresponding sequence, and the second column "
"is the path to the FASTA file itself."}
),
'layers-information-file': (
['-D', '--layers-information-file'],
{'metavar': 'FILE',
'help': "A TAB-delimited file with information about layers in your dataset. Each row in this "
"file must correspond to a sample name. Each column must contain a unique attribute. "
"Please refer to the documentation to learn more about the structure and purpose of "
"this file."}
),
'layers-order-file': (
['-R', '--layers-order-file'],
{'metavar': 'FILE',
'help': "A TAB-delimited file with three columns: 'attribute', 'basic', 'newick'. For each attribute, "
"the order of samples must be defined either in the 'basic' form or via a 'newick'-formatted "
"tree structure that describes the organization of each sample. Anvi'o will look for a "
"comma-separated list of sample names for the 'basic' form. Please refer to the online docs "
"for more info. Also you shouldn't hesitate to try to find the right file format until you get "
"it working. There are stringent checks on this file, and you will not break anything while trying!."}
),
'split-length': (
['-L', '--split-length'],
{'metavar': 'INT',
'default': 20000,
'type': int,
'help': "Anvi'o splits very long contigs into smaller pieces, without actually splitting them for real. These "
"'virtual' splits improve the efficacy of the visualization step, and changing the split size gives "
"freedom to the user to adjust the resolution of their display when necessary. The default value is "
"(%(default)d). If you are planning to use your contigs database for metagenomic binning, we advise you "
"to not go below 10,000 (since the lower the split size is, the more items to show in the display, and "
"decreasing the split size does not really help much to binning). But if you are thinking about using this "
"parameter for ad hoc investigations other than binning, you should ignore our advice, and set the split "
"size as low as you want. If you do not want your contigs to be split, you can set the split size to '0' "
"or any other negative integer (lots of unnecessary freedom here, enjoy!)."}
),
'kmer-size': (
['-K', '--kmer-size'],
{'metavar': 'INT',
'default': 4,
'type': int,
'help': "K-mer size for k-mer frequency calculations. The default k-mer size for composition-based "
"analyses is 4, historically. Although tetra-nucleotide frequencies seem to offer the "
"the sweet spot of sensitivity, information density, and manageable number of dimensions "
"for clustering approaches, you are welcome to experiment (but maybe you should leave "
"it as is for your first set of analyses)."}
),
'prodigal-translation-table': (
['--prodigal-translation-table'],
{'metavar': 'INT',
'default': None,
'help': "This is a parameter to pass to the Prodigal for a specific translation table. This parameter "
"corresponds to the parameter `-g` in Prodigal, the default value of which is 11 (so if you do "
"not set anything, it will be set to 11 in Prodigal runtime. Please refer to the Prodigal "
"documentation to determine what is the right translation table for you if you think you need "
"it.)"}
),
'skip-gene-calling': (
['--skip-gene-calling'],
{'default': False,
'action': 'store_true',
'help': "By default, generating an anvi'o contigs database includes the identification of open reading "
"frames in contigs by running a bacterial gene caller. Declaring this flag will by-pass that "
"process. If you prefer, you can later import your own gene calling results into the database."}
),
'remove-partial-hits': (
['--remove-partial-hits'],
{'default': False,
'action': 'store_true',
'help': "By default anvi'o will return hits even if they are partial. Declaring this flag will make "
"anvi'o filter all hits that are partial. Partial hits are hits in which you asked for n1 "
"genes before and n2 genes after the gene that matched the search criteria but the search "
"hits the end of the contig before finding the number of genes that you asked."}
),
'never-reverse-complement': (
['--never-reverse-complement'],
{'default': False,
'action': 'store_true',
'help': "By default, if a gene that is found by the search criteria is reverse in it's direction, "
"then the sequence of the entire locus is reversed before it is saved to the output. "
"If you wish to prevent this behavior then use the flag --never-reverse-complement.",}
),
'zeros-are-outliers': (
['--zeros-are-outliers'],
{'default': False,
'action': 'store_true',
'help': "If you want all zero coverage positions to be treated like outliers "
"then use this flag. The reason to treat zero coverage as outliers "
"is because when mapping reads to a reference we could get many zero "
"positions due to accessory genes. These positions then skew the average "
"values that we compute."}
),
'outliers-threshold': (
['--outliers-threshold'],
{'default': 1.5,
'type': float,
'metavar': 'NUM',
'help': "Threshold to use for the outlier detection. The default value is '%(default).1f'. "
"Absolute deviation around the median is used. To read more about the method please "
"refer to: 'How to Detect and Handle Outliers' by Boris Iglewicz and David Hoaglin "
"(doi:10.1016/j.jesp.2013.03.013)."}
),
'external-gene-calls': (
['--external-gene-calls'],
{'metavar': 'GENE-CALLS',
'help': "A TAB-delimited file to define external gene calls. The file must have these columns: 'gene_callers_id' "
"(a unique integer number for each gene call, start from 1), 'contig' (the contig name the gene call is found), "
"'start' (start position, integer), 'stop' (stop position, integer), 'direction' (the direction of the gene open reading "
"frame; can be 'f' or 'r'), 'partial' (whether it is a complete gene call, or a partial one; must be 1 for partial "
"calls, and 0 for complete calls), 'call_type' (1 if it is coding, 2 if it is noncoding, or 3 if it is unknown (only gene "
"calls with call_type = 1 will have amino acid sequences translated)), 'source' (the gene caller), "
"and 'version' (the version of the gene caller, i.e., v2.6.7 or v1.0). An additional 'optional' column is 'aa_sequence'"
" to explicitly define the amino acid seqeuence of a gene call so anvi'o does not attempt to translate the "
"DNA sequence itself. An EXAMPLE FILE (with the optional 'aa_sequence' column (so feel free to take it out "
"for your own case)) can be found at the URL https://bit.ly/2qEEHuQ. If you are providing external gene calls, "
"please also see the flag `--skip-predict-frame`."}
),
'external-structures': (
['--external-structures'],
{'metavar': 'FILE_PATH',
'help': "A two-column TAB-delimited flat text file that lists PDB protein structures. The first item "
"in the header line should read 'gene_callers_id', and the second should read 'path'. Each line in the "
"file should describe a single entry, where the first column is the gene_callers_id that the structure corresponds "
"to, and the second column is the path to the structure file."}
),
'external-genomes': (
['-e', '--external-genomes'],
{'metavar': 'FILE_PATH',
'help': "A two-column TAB-delimited flat text file that lists anvi'o contigs databases. The first item "
"in the header line should read 'name', and the second should read 'contigs_db_path'. Each line in the "
"file should describe a single entry, where the first column is the name of the genome (or MAG), and "
"the second column is the anvi'o contigs database generated for this genome."}
),
'internal-genomes': (
['-i', '--internal-genomes'],
{'metavar': 'FILE_PATH',
'help': "A five-column TAB-delimited flat text file. The header line must contain these columns: 'name', 'bin_id', "
"'collection_id', 'profile_db_path', 'contigs_db_path'. Each line should list a single entry, where 'name' "
"can be any name to describe the anvi'o bin identified as 'bin_id' that is stored in a collection."}
),
'skip-checking-genome-hashes': (
['--skip-checking-genome-hashes'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you would like anvi'o to skip checking genome hashes. This is only relevant if you may have "
"genomes in your internal or external genomes files that have identical sequences with different names AND if "
"you are OK with it. You may be OK with it, for instance, if you are using `anvi-dereplicate-genomes` program "
"to dereplicate genomes desribed in multiple collections in an anvi'o profile database that may be describing "
"the same genome multiple times (see https://github.com/merenlab/anvio/issues/1397 for a case)."}
),
'metagenomes': (
['-M', '--metagenomes'],
{'metavar': 'FILE_PATH',
'help': "A two-column TAB-delimited flat text file. The header line must contain these columns: 'name', "
"'contigs_db_path', and 'profile_db_path'. Each line should list a single entry, where 'name' "
"can be any name to describe the metagenome stored in the anvi'o contigs database. In this "
"context, the anvi'o profiles associated with contigs database must be SINGLE PROFILES, as in "
"generated by the program `anvi-profile` and not `anvi-merge`."}
),
'bams-and-profiles': (
['-P', '--bams-and-profiles'],
{'metavar': 'FILE_PATH',
'help': "A four-column TAB-delimited flat text file. The header line must contain these columns: 'name', "
"'contigs_db_path', 'profile_db_path', and 'bam_file_path'. See the profiles-and-bams.txt artifact "
"for the details of the file."}
),
'gene-caller': (
['--gene-caller'],
{'metavar': 'GENE-CALLER',
'default': constants.default_gene_caller,
'help': f"The gene caller to utilize. Anvi'o supports multiple gene callers, and some operations (including this one) "
f"requires an explicit mentioning of which one to use. The default {constants.default_gene_caller} is but it "
f"will not be enough if you were experiencing your rebelhood as you should, and have generated your contigs "
f"database with `--external-gene-callers` or something. Also, some HMM collections may add new gene calls "
f"into a given contigs database as an ad-hoc fashion, so if you want to see all the options available to you "
f"in a given contigs database, please run the program `anvi-db-info` and take a look at the output."}
),
'list-gene-callers': (
['--list-gene-callers'],
{'default': False,
'action': 'store_true',
'help': "List available gene callers in the contigs database and quit."}
),
'ignore-internal-stop-codons': (
['--ignore-internal-stop-codons'],
{'default': False,
'action': 'store_true',
'help': "This is only relevant when you have an external gene calls file. If anvi'o figures out that your custom gene calls "
"result in amino acid sequences with stop codons in the middle, it will complain about it. You can use this flag "
"to tell anvi'o to don't check for internal stop codons, Even though this shouldn't happen in theory, we understand "
"that it almost always does. In these cases, anvi'o understands that sometimes we don't want to care, and will "
"not judge you. Instead, it will replace every stop codon residue in the amino acid sequence with an 'X' character. "
"Please let us know if you used this and things failed, so we can tell you that you shouldn't have really used it "
"if you didn't like failures at the first place (smiley)."}
),
'skip-predict-frame': (
['--skip-predict-frame'],
{'default': False,
'action': 'store_true',
'help': "When you have provide an external gene calls file, anvi'o will predict the correct frame for gene calls as best as it can by "
"using a previously-generated Markov model that is trained using the uniprot50 database (see this for details: "
"https://github.com/merenlab/anvio/pull/1428), UNLESS there is an `aa_sequence` entry for a given gene call in the external "
"gene calls file. Please note that PREDICTING FRAMES MAY CHANGE START/STOP POSITIONS OF YOUR GENE CALLS SLIGHTLY, if "
"those that are in the external gene calls file are not describing proper gene calls according to the model. "
"If you use this flag, anvi'o will not rely on any model and will attempt to translate your DNA sequences by solely "
"relying upon start/stop positions in the file, but it will complain about sequences start/stop positions of which are "
"not divisible by 3."}
),
'get-samples-stats-only': (
['--get-samples-stats-only'],
{'default': False,
'action': 'store_true',
'help': "If you only wish to get statistics regarding the occurrence of bins in samples, then use this flag. "
"Especially when dealing with many samples or large genomes, gene stats could be a long time to compute. "
"By using this flag you could save a lot of computation time."}
),
'gen-figures': (
['--gen-figures'],
{'default': False,
'action': 'store_true',
'help': "For those of you who wish to dig deeper, a collection of figures could be created to allow "
"you to get insight into how the classification was generated. This is especially useful to "
"identify cases in which you shouldn't trust the classification (for example due to a large "
"number of outliers). NOTICE: if you ask anvi'o to generate these figures then it will "
"significantly extend the execution time. To learn about which figures are created and what "
"they mean, contact your nearest anvi'o developer, because currently it is a well-hidden secret."}
),
'skip-SNV-profiling': (
['--skip-SNV-profiling'],
{'default': False,
'action': 'store_true',
'help': "By default, anvi'o characterizes single-nucleotide variation in each sample. The use of this flag "
"will instruct profiler to skip that step. Please remember that parameters and flags must be "
"identical between different profiles using the same contigs database for them to merge properly."}
),
'skip-INDEL-profiling': (
['--skip-INDEL-profiling'],
{'default': False,
'action': 'store_true',
'help': "The alignment of a read to a reference genome/sequence can be imperfect, such that the read exhibits "
"insertions or deletions relative to the reference. Anvi'o normally stores this information in the "
"profile database since the time taken and extra storage do not amount to much, but if you insist on not "
"having this information, you can skip storing this information by providing this flag. Note: If "
"--skip-SNV-profiling is provided, --skip-INDEL-profiling will automatically be enforced."}
),
'return-AA-frequencies-instead': (
['--return-AA-frequencies-instead'],
{'default': False,
'action': 'store_true',
'help': "By default, anvi'o will return codon frequencies (as the name suggests), but you can ask for amino "
"acid frequencies instead, simply because you always need more data and more stuff. You're lucky "
"this time, but is there an end to this? Will you ever be satisfied with what you have? "
"Anvi'o needs answers."}
),
'profile-SCVs': (
['--profile-SCVs'],
{'default': False,
'action': 'store_true',
'help': "Anvi'o can perform accurate characterization of codon frequencies in genes during profiling. While having "
"codon frequencies opens doors to powerful evolutionary insights in downstream analyses, due to its "
"computational complexity, this feature comes 'off' by default. Using this flag you can rise against the "
"authority, as you always should, and make anvi'o profile codons."}
),
'drop-previous-annotations': (
['--drop-previous-annotations'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you want anvi'o to remove ALL previous functional annotations for your genes, "
"and then import the new data. The default behavior will add any annotation source into the db "
"incrementally unless there are already annotations from this source. In which case, it will first "
"remove previous annotations for that source only (i.e., if source X is both in the db and in the "
"incoming annotations data, it will replace the content of source X in the db)."}
),
'skip-mindful-splitting': (
['--skip-mindful-splitting'],
{'default': False,
'action': 'store_true',
'help': "By default, anvi'o attempts to prevent soft-splitting large contigs by cutting proper gene calls "
"to make sure a single gene is not broken into multiple splits. This requires a careful "
"examination of where genes start and end, and to find best locations to split contigs with respect "
"to this information. So, when the user asks for a split size of, say, 1,000, it serves as a "
"mere suggestion. When this flag is used, anvi'o does what the user wants and creates splits at "
"desired lengths (although some functionality may become unavailable for the projects that rely on "
"a contigs database that is initiated this way)."}
),
'db-variant': (
['--db-variant'],
{'metavar': 'VARIANT',
'required': False,
'default': 'unknown',
'help': "A free-form text variable to associate a database with a variant for power users and/or programmers. "
"Please leave this blank unless you are certain that you need to set a db variant since it may influence "
"downstream processes. In an ideal world a variant would be a single-word, without any capitalized letters "
"or special characters."}
),
'contigs-fasta': (
['-f', '--contigs-fasta'],
{'metavar': 'FASTA',
'required': True,
'help': "The FASTA file that contains reference sequences you mapped your samples against. This "
"could be a reference genome, or contigs from your assembler. Contig names in this file "
"must match to those in other input files. If there is a problem anvi'o will gracefully "
"complain about it."}
),
'view-data': (
['-d', '--view-data'],
{'metavar': 'VIEW_DATA',
'help': "A TAB-delimited file for view data"}
),
'tree': (
['-t', '--tree'],
{'metavar': 'NEWICK',
'help': "NEWICK formatted tree structure"}
),
'items-order': (
['--items-order'],
{'metavar': 'FLAT_FILE',
'help': "A flat file that contains the order of items you wish the display using the interactive interface. You "
"may want to use this if you have a specific order of items in your mind, and do not want to display a "
"tree in the middle (or simply you don't have one). The file format is simple: each line should have an "
"item name, and there should be no header."}
),
'additional-layers': (
['-A', '--additional-layers'],
{'metavar': 'ADDITIONAL_LAYERS',
'help': "A TAB-delimited file for additional layers for splits. The first column of this file "
"must be split names, and the remaining columns should be unique attributes. "
"The file does not need to contain all split names, or values for each split in "
"every column. Anvi'o will try to deal with missing data nicely. Each column in this "
"file will be visualized as a new layer in the tree."}
),
'target-data-group': (
['-D', '--target-data-group'],
{'metavar': 'NAME',
'default': None,
'help': "Data group to focus. Anvi'o misc data tables support associating a set of data keys "
"with a data group. If you have no idea what this is, then probably you don't need it, "
"and anvi'o will take care of you. Note: this flag is IRRELEVANT if you are working with "
"additional order data tables."}
),
'target-data-table': (
['-t', '--target-data-table'],
{'metavar': 'NAME',
'help': "The target table is the table you are interested in accessing. Currently it can be 'items','layers', or "
"'layer_orders'. Please see most up-to-date online documentation for more information."}
),
'view': (
['--view'],
{'metavar': 'NAME',
'help': "Start the interface with a pre-selected view. To see a list of available views, "
"use --show-views flag."}
),
'category-variable': (
['--category-variable'],
{'default': None,
'metavar': 'CATEGORY',
'help': "The additional layers data variable name that divides layers into multiple categories."}
),
'include-ungrouped': (
['--include-ungrouped'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you want anvi'o to include genomes/samples with no group in the analysis. (For pangenomes, this means "
"the genome has no value set for the category variable which you specified using --category-variable. "
"For modules, this means the sample has no group specified in the groups-txt file. And for regular 'ol "
"genomes, this means the genome has nothing in the 'group' column of the input file). By default all "
"variables with no value will be ignored, but if you apply this flag, they will instead be considered as "
"a single group (called 'UNGROUPED' when performing the statistical analysis."}
),
'include-samples-missing-from-groups-txt': (
['--include-samples-missing-from-groups-txt'],
{'default': False,
'action': 'store_true',
'help': "Sometimes, you might have some sample names in your modules-txt file that you did not include in the groups-txt file. "
"This is fine. By default, we will ignore those samples because they do not have a group. But if you use this flag, then "
"instead those samples will be included in a group called 'UNGROUPED'. Be cautious when using this flag in combination with "
"the --include-ungrouped flag (which also sticks samples without groups into the 'UNGROUPED' group) so that you don't accidentally "
"group together samples that are not supposed to be friends."}
),
'functional-occurrence-table-output': (
['-F', '--functional-occurrence-table-output'],
{'metavar': 'FILE',
'default': None,
'type': str,
'help': "Saves the occurrence frequency information for functions in genomes in a TAB-delimited format. "
"A file name must be provided. To learn more about how the functional occurrence is computed, please "
"refer to the tutorial."}
),
'table': (
['--table'],
{'metavar': 'TABLE_NAME',
'help': "Table name to export."}
),
'fields': (
['-f', '--fields'],
{'metavar': 'FIELD(S)',
'help': "Fields to report. Use --list-tables parameter with a table name to see available "
"fields You can list fields using this notation: --fields 'field_1, field_2, ... field_N'."}
),
'list': (
['-l', '--list'],
{'default': False,
'action': 'store_true',
'help': "Gives a list of tables in a database and quits. If a table is already declared "
"this time it lists all the fields in a given table, in case you would to export "
"only a specific list of fields from the table using --fields parameter."}
),
'title': (
['--title'],
{'metavar': 'NAME',
'help': "Title for the interface. If you are working with a RUNINFO dict, the title "
"will be determined based on information stored in that file. Regardless, "
"you can override that value using this parameter."}
),
'split-hmm-layers': (
['--split-hmm-layers'],
{'default': False,
'action': 'store_true',
'help': "When declared, this flag tells the interface to split every gene found in HMM "
"searches that were performed against non-singlecopy gene HMM profiles into "
"their own layer. Please see the documentation for details."}
),
'annotation-source-for-per-split-summary': (
['-F', '--annotation-source-for-per-split-summary'],
{'default': None,
'type': str,
'metavar': 'FUNCTION ANNOTATION SOURCE',
'help': "Using this parameter with a functional annotation source that (1) is in the contigs database "
"and (2) has a maximum of 10 different function names, will dynamically add a new layer to the "
"intearctive interface where proportions of functions in that source will be shown per split "
"as stacked bar charts."}
),
'show-all-layers': (
['--show-all-layers'],
{'default': False,
'action': 'store_true',
'help': "When declared, this flag tells the interface to show every additional layer even if "
"there are no hits. By default, anvi'o doesn't show layers if there are no hits for "
"any of your items."}
),
'taxonomic-level': (
['--taxonomic-level'],
{'default': 't_genus',
'type': str,
'choices': constants.levels_of_taxonomy,
'help': "The taxonomic level to use whenever relevant and/or available. The default taxonomic level "
"is %(default)s, but if you choose something specific, anvi'o will focus on that whenever "
"possible."}
),
'taxonomy-file': (
['-t', '--taxonomy-file'],
{'default': None,
'type': str,
'help': "Path to The taxonomy file format tsv containe: "
"ID\td__domaine;p__phylum;[..];s__genus species"}
),
'metagenome-mode': (
['-m', '--metagenome-mode'],
{'default': False,
'action': 'store_true',
'help': "Treat a given contigs database as a metagenome rather than treating it as a single genome."}
),
'scg-name-for-metagenome-mode': (
['-S','--scg-name-for-metagenome-mode'],
{'default': None,
'type': str,
'metavar': 'SCG_NAME',
'help': "When running in metagenome mode, anvi'o automatically chooses the most frequent single-copy "
"core gene to estimate the taxonomic composition within a contigs database. If you have a "
"different preference you can use this parameter to communicate that."}
),
'report-scg-sequences-file-prefix': (
['--report-scg-sequences-file-prefix'],
{'default': None,
'type': str,
'metavar': 'FILE NAME PREFIX',
'help': "When running in metagenome mode, anvi'o has access to each SCG sequence. By providing a "
"file prefix, you can instruct anvi'o to report amino acid and DNA sequences for each SCG "
"it uses to estimate taxonomy. The deflines of the resulting FASTA files will match tot he "
"unique entry IDs used to populate the output file that reports taxonomy. Please note that "
"this parameter will only run if you set the parameter `--scg-name-for-metagenome-mode`."}
),
'anticodon-for-metagenome-mode': (
['-S','--anticodon-for-metagenome-mode'],
{'default': None,
'type': str,
'metavar': 'ANTICODON',
'help': "When running in metagenome mode, anvi'o automatically chooses the most frequent anticodon "
"to estimate the taxonomic composition within a contigs database. If you have a "
"different preference you can use this parameter to communicate that."}
),
'per-anticodon-output-file': (
['--per-anticodon-output-file'],
{'default': None,
'type': str,
'metavar': 'FILE_PATH',
'help': "A more detailed output file that will describe taxonomy of each anticodon in a single bin. "
"When consensus taxonomy is generated per bin or genome, taxonomy for each underlying item "
"is not reported. This additional optional output file will elucidate things."}
),
'per-scg-output-file': (
['--per-scg-output-file'],
{'default': None,
'type': str,
'metavar': 'FILE_PATH',
'help': "A more detailed output file that will describe taxonomy of each scg in a single bin. "
"When consensus taxonomy is generated per bin or genome, taxonomy for each underlying item "
"is not reported. This additional optional output file will elucidate things."}
),
'all-hits-output-file': (
['--all-hits-output-file'],
{'default': None,
'type': str,
'metavar': 'FILE_PATH',
'help': "If this flag is declared, anvi'o will store a comprehensive list of hits that led to the "
"determination of the consensus hit per sequence (which is the only piece of information that "
"is stored in the contigs database)."}
),
'report-scg-frequencies': (
['--report-scg-frequencies'],
{'default': None,
'type': str,
'metavar': 'FILE_PATH',
'help': "Report SCG frequencies in a TAB-delimited file and quit. This is a great way to decide which "
"SCG name to use in metagenome mode (we often wish to use the most frequent SCG to increase the "
"detection of taxa)."}
),
'report-anticodon-frequencies': (
['--report-anticodon-frequencies'],
{'default': None,
'type': str,
'metavar': 'FILE_PATH',
'help': "Report anticodon frequencies in a TAB-delimited file and quit. This is a great way to decide which "
"anticodon to use in metagenome mode (we often wish to use the most frequent anticodon to increase the "
"detection of taxa)."}
),
'simplify-taxonomy-information': (
['--simplify-taxonomy-information'],
{'default': False,
'action': 'store_true',
'help': "The taxonomy output may include a large number of names that contain clade-specific code for "
"not-yet-characterized taxa. With this flag you can simplify taxon names. This will influence "
"all output files and displays as the use of this flag will on-the-fly trim taxonomic levels "
"with clade-specific code names."}
),
'compute-scg-coverages': (
['--compute-scg-coverages'],
{'default': False,
'action': 'store_true',
'help': "When this flag is declared, anvi'o will go back to the profile database to learn coverage "
"statistics of single-copy core genes for which we have taxonomy information."}
),
'compute-anticodon-coverages': (
['--compute-anticodon-coverages'],
{'default': False,
'action': 'store_true',
'help': "When this flag is declared, anvi'o will go back to the profile database to learn coverage "
"statistics of tRNA genes used for taxonomy."}
),
'update-profile-db-with-taxonomy': (
['--update-profile-db-with-taxonomy'],
{'default': False,
'action': 'store_true',
'help': "When anvi'o knows all both taxonomic affiliations and coverages across samples for single-copy "
"core genes, it can, in theory add this information to the profile database. With this flag you "
"can instruct anvi'o to do that and find information on taxonomy in the `layers` tab of your "
"interactive interface."}
),
'taxonomy-database': (
['-r', '--taxonomy-database'],
{'default': None,
'type': str,
'metavar': 'PATH',
'help': "Path to the directory that contains the BLAST databases for single-copy core "
"genes. You will almost never need to use this parameter unless you are "
"trying something very fancy. But when you do, you can tell anvi'o where "
"to look for database files through this parameter."}
),
'scgs-taxonomy-data-dir': (
['--scgs-taxonomy-data-dir'],
{'default': None,
'type': str,
'metavar': 'PATH',
'help': "The directory for SCGs data to be stored (or read from, depending on the context). "
"If you leave it as is without specifying anything, anvi'o will set up everything in "
"(or try to read things from) a pre-defined default directory. The advantage of using "
"the default directory at the time of set up is that every user of anvi'o on a computer "
"system will be using a single data directory, but then you may need to run the setup "
"program with superuser privileges. If you don't have superuser privileges, then you can "
"use this parameter to tell anvi'o the location you wish to use to setup your databases. "
"If you are using a program (such as `anvi-run-scg-taxonomy` or `anvi-estimate-scg-taxonomy`) "
"you will have to use this parameter to tell those programs where your data are."}
),
'trna-taxonomy-data-dir': (
['--trna-taxonomy-data-dir'],
{'default': None,
'type': str,
'metavar': 'PATH',
'help': "The directory for tRNA taxonomy data to be stored (or read from, depending on the context). "
"If you leave it as is without specifying anything, anvi'o will set up everything in "
"(or try to read things from) a pre-defined default directory. The advantage of using "
"the default directory at the time of set up is that every user of anvi'o on a computer "
"system will be using a single data directory, but then you may need to run the setup "
"program with superuser privileges. If you don't have superuser privileges, then you can "
"use this parameter to tell anvi'o the location you wish to use to setup your databases. "
"If you are using a program (such as `anvi-run-trna-taxonomy` or `anvi-estimate-trna-taxonomy`) "
"you will have to use this parameter to tell those programs where your data are."}
),
'gtdb-release': (
['--gtdb-release'],
{'default': None,
'type': str,
'metavar': 'RELEASE_NUM',
'help': "If you are particularly intersted an earlier release anvi'o knows about, you can set it here "
"Otherwise anvi'o will always use the latest release it knows about."}
),
'reset': (
['--reset'],
{'default': False,
'action': 'store_true',
'help': "Remove all the previously stored files and start over. If something is feels wrong "
"for some reason and if you believe re-downloading files and setting them up could "
"address the issue, this is the flag that will tell anvi'o to act like a real computer "
"scientist challenged with a computational problem."}
),
'redo-databases': (
['--redo-databases'],
{'default': False,
'action': 'store_true',
'help': "Remove existing databases and re-create them. This can be necessary when versions of "
"programs change and databases they create and use become incompatible."}
),
'cog-data-dir': (
['--cog-data-dir'],
{'default': None,
'type': str,
'help': "The directory path for your COG setup. Anvi'o will try to use the default path "
"if you do not specify anything."}
),
'cog-version': (
['--cog-version'],
{'default': None,
'type': str,
'help': "COG version. The default is the latest version, which is COG20, meaning that anvi'o will "
"use the NCBI's 2020 release of COGs to setup the database and run it on contigs databases. "
"There is also an older version of COGs from 2014. If you would like anvi'o to work with that "
"one, please use COG14 as a parameter. On a single computer you can have both, and on a single "
"contigs database you can run both. Cool and confusing. The anvi'o way."}
),
'pfam-data-dir': (
['--pfam-data-dir'],
{'default': None,
'type': str,
'help': "The directory path for your Pfam setup. Anvi'o will try to use the default path "
"if you do not specify anything."}
),
'pdb-database-path': (
['--pdb-database-path'],
{'default': None,
'type': str,
'metavar': 'PATH',
'help': "The path for the PDB database to be stored. "
"If you leave it as is without specifying anything, anvi'o will set up everything in "
"a pre-defined default directory. The advantage of using "
"the default directory at the time of set up is that every user of anvi'o on a computer "
"system will be using a single data directory, but then you may need to run the setup "
"program with superuser privileges. If you don't have superuser privileges, then you can "
"use this parameter to tell anvi'o the location you wish to use to setup your database."}
),
'interacdome-data-dir': (
['--interacdome-data-dir'],
{'default': None,
'type': str,
'metavar': 'PATH',
'help': "The path for the interacdome data to be stored. "
"If you leave it as is without specifying anything, anvi'o will set up everything in "
"a pre-defined default directory. The advantage of using "
"the default directory at the time of set up is that every user of anvi'o on a computer "
"system will be using a single data directory, but then you may need to run the setup "
"program with superuser privileges. If you don't have superuser privileges, then you can "
"use this parameter to tell anvi'o the location you wish to use to setup your data."}
),
'interacdome-dataset': (
['--interacdome-dataset'],
{'default': 'representable',
'type': str,
'choices': ['representable', 'confident'],
'help': "Choose 'representable' to include Pfams that correspond to domain-ligand interactions that had "
"nonredundant instances across three or more distinct PDB structures. InteracDome"
"authors recommend using this collection to learn more about domain binding properties. Choose "
"'confident' to include Pfams that correspond to domain-ligand interactions "
"that had nonredundant instances across three or more distinct PDB entries and "
"achieved a cross-validated precision of at least 0.5. We recommend using this "
"collection to annotate potential ligand-binding positions in protein "
"sequences. The default is '%(default)s'."}
),
'kegg-data-dir': (
['--kegg-data-dir'],
{'default': None,
'metavar': 'DIR_PATH',
'type': str,
'help': "The directory path for your KEGG setup, which will include things like "
"KOfam profiles and KEGG MODULE data. Anvi'o will try to use the default path "
"if you do not specify anything."}
),
'user-modules': (
['-u', '--user-modules'],
{'default': None,
'metavar': 'DIR_PATH',
'type': str,
'help': "Directory location where your metabolic module files are kept. It is also "
"the output directory, since the modules database will be set up in this folder."}
),
'only-user-modules': (
['--only-user-modules'],
{'default': False,
'action': 'store_true',
'help': "If you use this flag in conjunction with --user-modules, anvi'o will ONLY "
"run estimation on your user-defined metabolism data (ie, it will NOT use KEGG at all). "
"The default is to run on both KEGG and user data when --user-modules is provided."}
),
'kegg-archive': (
['--kegg-archive'],
{'default': None,
'type': str,
'help': "The path to an archived (.tar.gz) KEGG directory (which you have downloaded from figshare or from "
"a collaborator who has a KEGG data directory generated by anvi'o). If you provide this parameter, "
"anvi'o will set up the KEGG data directory from the archive you specify rather than downloading "
"and setting up our default KEGG archive."}
),
'download-from-kegg': (
['-D', '--download-from-kegg'],
{'default': False,
'action': 'store_true',
'help': "This flag is for those people who always need the latest data. You know who you are :) "
"By default, this program will set up a snapshot of the KEGG databases, which will be "
"dated to the time of the anvi'o release that you are currently working with. The pros of "
"this are that the KEGG data will be the same for everyone (which makes sharing your KEGG-annotated "
"datasets easy), and you will not have to worry about updating your datasets with new annotations "
"every time that KEGG updates. However, KEGG updates regularly, so the con of this is that "
"you will not have the most up-to-date version of KEGG for your annotations, metabolism "
"estimations, or any other downstream uses of this data. If that is going to be a problem for you, "
"do not fear - you can provide this flag to tell anvi'o to download the latest, freshest data directly "
"from KEGG's REST API and set it up into an anvi'o-compatible database."}
),
'kegg-snapshot': (
['--kegg-snapshot'],
{'default': None,
'type': str,
'metavar': 'RELEASE_NUM',
'help': "If you are particularly interested in an earlier snapshot of KEGG that anvi'o knows about, you can set it here. "
"Otherwise anvi'o will always use the latest snapshot it knows about, which is likely to be the one associated with "
"the current release of anvi'o."}
),
'hide-outlier-SNVs': (
['--hide-outlier-SNVs'],
{'default': False,
'action': 'store_true',
'help': "During profiling, anvi'o marks positions of single-nucleotide variations (SNVs) "
"that originate from places in contigs where coverage values are a bit 'sketchy'. "
"If you would like to avoid SNVs in those positions of splits in applicable projects "
"you can use this flag, and the interface would hide SNVs that are marked as 'outlier' "
"(although it is clearly the best to see everything, no one will judge you if you end "
"up using this flag) (plus, there may or may not be some historical data on this here: "
"https://github.com/meren/anvio/issues/309)."}
),
'hmmer-program': (
['--hmmer-program'],
{'type': str,
'required': False,
'help': "Which of the HMMER programs to use to run HMMs (hmmscan or hmmsearch). By default "
"anvi'o will use hmmscan for typical HMM operations like those in anvi-run-hmms (as these "
"tend to scan a very large number of genes against a relatively small number of HMMs), "
"but if you are using this program to scan a very large number of HMMs, hmmsearch might "
"be a better choice for performance. For this reason, hmmsearch is the default in operations like "
"anvi-run-pfams and anvi-run-kegg-kofams. See this article for a discussion on the performance "
"of these two programs: https://cryptogenomicon.org/2011/05/27/hmmscan-vs-hmmsearch-speed-the-numerology/"}
),
'hmm-source': (
['--hmm-source'],
{'metavar': 'SOURCE NAME',
'default': None,
'help': "Use a specific HMM source. You can use '--list-hmm-sources' flag to see "
"a list of available resources. The default is '%(default)s'."}
),
'hmm-sources': (
['--hmm-sources'],
{'metavar': 'SOURCE NAME',
'help': "Get sequences for a specific list of HMM sources. You can list one or more "
"sources by separating them from each other with a comma character (i.e., "
"'--hmm-sources source_1,source_2,source_3'). If you would like to see a list "
"of available sources in the contigs database, run this program with "
"'--list-hmm-sources' flag."}
),
'list-hmm-sources': (
['-l', '--list-hmm-sources'],
{'default': False,
'action': 'store_true',
'help': "List available HMM sources in the contigs database and quit."}
),
'annotation-source': (
['--annotation-source'],
{'metavar': 'SOURCE NAME',
'default': None,
'help': "Get functional annotations for a specific annotation source. You can use the flag "
"'--list-annotation-sources' to learn about what sources are available."}
),
'annotation-sources': (
['--annotation-sources'],
{'metavar': 'SOURCE NAME[S]',
'default': None,
'help': "Get functional annotations for a specific list of annotation sources. You "
"can specify one or more sources by separating them from each other with a comma "
"character (i.e., '--annotation-sources source_1,source_2,source_3'). The default "
"behavior is to return everything"}
),
'list-annotation-sources': (
['-l', '--list-annotation-sources'],
{'default': False,
'action': 'store_true',
'help': "List available functional annotation sources."}
),
'aggregate-based-on-accession': (
['--aggregate-based-on-accession'],
{'default': False,
'action': 'store_true',
'help': "This is important. When anvi'o aggregates functions for functional enrichment analyses "
"or to display them, it uses by default the 'function text' as keys. This is because "
"multiple accession IDs in various databases may correspond to the same function, and "
"when you are doing a functional enrichment analysis, you most likely would like to "
"avoid over-splitting of functions due to this. But then how can we know if you are "
"doing something that requires things to be aggregated based on accession ids for "
"functions rather than actual functions? We can't. But we have this flag here so you can "
"instruct anvi'o to listen to you and not to us."}
),
'aggregate-using-all-hits': (
['--aggregate-using-all-hits'],
{'default': False,
'action': 'store_true',
'help': "This program will aggregate functions based on best hits only, and this flag will change that "
"behavior. In some cases a gene may be annotated with multiple functions. This is a decision often "
"made at the level of function annotation tool. For instance, when you run `anvi-run-ncbi-cogs`, "
"you may end up having two COG annotations for a single gene because the gene hit both of them "
"with significance scores that were above the default noise cutoff. While this can be useful when "
"one visualizes functions or works with an `anvi-summarize` output where things should be most "
"comprehensive, having some genes annotated with multiple functions and others with one function "
"may over-split them (since in this scenario a gene with COGXXX and COGXXX;COGYYY would end up in "
"different bins). Thus, when working on functional enrichment analyses or displaying functions "
"anvi'o will only use the best hit for any gene that has multiple hits by default. But you can turn "
"that behavior off explicitly and show anvi'o who is the boss by using this flag."}
),
'include-gc-identity-as-function': (
['--include-gc-identity-as-function'],
{'default': False,
'action': 'store_true',
'help': "This is an option that asks anvi'o to treat gene cluster names as functions. By "
"doing so, you are in fact creating an opportunity to study functional enrichment "
"statistics for each gene cluster independently. For instance, multiple gene "
"clusters may have the same COG function. But if you wish to use the same enrichment "
"analysis in your pangenome without collapsing multiple gene clusters into a single "
"function name, you can use this flag, and ask for 'IDENTITY' as the functional "
"annotation source."}
),
'gene-names': (
['--gene-names'],
{'metavar': 'HMM HIT NAME',
'help': "Get sequences only for a specific gene name. Each name should be separated from "
"each other by a comma character. For instance, if you want to get back only RecA "
"and Ribosomal_L27, you can type '--gene-names RecA,Ribosomal_L27', and you will "
"get any and every hit that matches these names in any source. If you would like "
"to see a list of available gene names, you can use '--list-available-gene-names' "
"flag."}
),
'get-aa-sequences': (
['--get-aa-sequences'],
{'default': False,
'action': 'store_true',
'help': "Store amino acid sequences instead."}
),
'return-best-hit': (
['--return-best-hit'],
{'default': False,
'action': 'store_true',
'help': "A bin (or genome) may contain more than one hit for a gene name in a given HMM source. For instance, there may "
"be multiple RecA hits in a genome bin from Campbell et al.. Using this flag, will go through all of "
"the gene names that appear multiple times, and remove all but the one with the lowest e-value. Good "
"for whenever you really need to get only a single copy of single-copy core genes from a genome bin."}
),
'unique-genes': (
['--unique-genes'],
{'default': False,
'action': 'store_true',
'help': "An HMM source may contain multiple models that can hit the same gene in a given bin or genome. "
"Using this flag, you can ask anvi'o to go through all genes, identify those with multiple hits "
"and report only the most significant hit for each unique gene."}
),
'max-num-genes-missing-from-bin': (
['--max-num-genes-missing-from-bin'],
{'default': None,
'metavar': 'INTEGER',
'help': "This filter removes bins (or genomes) from your analysis. If you have a list of gene names, you can "
"use this parameter to omit any bin (or external genome) that is missing more than a number of genes "
"you desire. For instance, if you have 100 genome bins, and you are interested in working with 5 "
"ribosomal proteins, you can use '--max-num-genes-missing-from-bin 4' to remove the bins that "
"are missing more than 4 of those 5 genes. This is especially useful for phylogenomic analyses. "
"Parameter 0 will remove any bin that is missing any of the genes."}
),
'min-num-bins-gene-occurs': (
['--min-num-bins-gene-occurs'],
{'default': None,
'metavar': 'INTEGER',
'help': "This filter removes genes from your analysis. Let's assume you have 100 bins to get sequences for HMM "
"hits. If you want to work only with genes among all the hits that occur in at least X number of bins, "
"and discard the rest of them, you can use this flag. If you say '--min-num-bins-gene-occurs 90', each "
"gene in the analysis will be required at least to appear in 90 genomes. If a gene occurs in less than "
"that number of genomes, it simply will not be reported. This is especially useful for phylogenomic "
"analyses, where you may want to only focus on genes that are prevalent across the set of genomes "
"you wish to analyze."}
),
'max-num-gene-clusters-missing-from-genome': (
['--max-num-gene-clusters-missing-from-genome'],
{'default': 0,
'metavar': 'INTEGER',
'help': "This filter will remove genomes from your report. If you have a list of gene cluster names, you can "
"use this parameter to omit any genome from your report if it is missing more than a number of genes "
"you desire. For instance, if you have 100 genomes in your pan genome, and you are interested in "
"working only with genomes that have all 5 specific gene clusters of your choice, you can use "
"'--max-num-gene-clusters-missing-from-genome 4' to remove remove the bins that "
"are missing more than 4 of those 5 genes. This is especially useful for phylogenomic analyses. "
"Parameter 0 will remove any genome that is missing any of the genes."}
),
'min-num-genomes-gene-cluster-occurs': (
['--min-num-genomes-gene-cluster-occurs'],
{'default': 0,
'metavar': 'INTEGER',
'help': "This filter will remove gene clusters from your report. Let's assume you have 100 genomes in your pan "
"genome analysis. You can use this parameter if you want to work only with gene clusters that occur in "
"at least X number of genomes. If you say '--min-num-genomes-gene-cluster-occurs 90', each "
"gene cluster in the analysis will be required at least to appear in 90 genomes. If a gene occurs in "
"less than that number of genomes, it simply will not be reported. This is especially useful for "
"phylogenomic analyses, where you may want to only focus on gene clusters that are prevalent across "
"the set of genomes you wish to analyze."}
),
'max-num-genomes-gene-cluster-occurs': (
['--max-num-genomes-gene-cluster-occurs'],
{'default': sys.maxsize,
'metavar': 'INTEGER',
'help': "This filter will remove gene clusters from your report. Let's assume you have 100 genomes in your pan "
"genome analysis. You can use this parameter if you want to work only with gene clusters that occur in "
"at most X number of genomes. If you say '--max-num-genomes-gene-cluster-occurs 1', you will get gene "
"clusters that are singletons. Combining this parameter with --min-num-genomes-gene-cluster-occurs can "
"give you a very precise way to filter your gene clusters."}
),
'min-num-genes-from-each-genome': (
['--min-num-genes-from-each-genome'],
{'default': 0,
'metavar': 'INTEGER',
'help': "This filter will remove gene clusters from your report. If you say '--min-num-genes-from-each-genome 2', "
"this filter will remove every gene cluster, to which every genome in your analysis contributed less than "
"2 genes. This can be useful to find out gene clusters with many genes from many genomes (such as conserved "
"multi-copy genes within a clade)."}
),
'max-num-genes-from-each-genome': (
['--max-num-genes-from-each-genome'],
{'default': sys.maxsize,
'metavar': 'INTEGER',
'help': "This filter will remove gene clusters from your report. If you say '--max-num-genes-from-each-genome 1', "
"every gene cluster that has more than one gene from any genome that contributes to it will be removed "
"from your analysis. This could be useful to remove gene clusters with paralogs from your report for "
"appropriate phylogenomic analyses. For instance, using '--max-num-genes-from-each-genome 1' and "
"'min-num-genomes-gene-cluster-occurs X' where X is the total number of your genomes, would give you the "
"single-copy gene clusters in your pan genome."}
),
'min-functional-homogeneity-index': (
['--min-functional-homogeneity-index'],
{'default': -1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--min-functional-homogeneity-index 0.3', "
"every gene cluster with a functional homogeneity index less than 0.3 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that are highly conserved in resulting function"}
),
'max-functional-homogeneity-index': (
['--max-functional-homogeneity-index'],
{'default': 1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--max-functional-homogeneity-index 0.5', "
"every gene cluster with a functional homogeneity index greater than 0.5 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that don't seem to be functionally conserved"}
),
'min-geometric-homogeneity-index': (
['--min-geometric-homogeneity-index'],
{'default': -1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--min-geometric-homogeneity-index 0.3', "
"every gene cluster with a geometric homogeneity index less than 0.3 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that are highly conserved in geometric configuration"}
),
'max-geometric-homogeneity-index': (
['--max-geometric-homogeneity-index'],
{'default': 1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--max-geometric-homogeneity-index 0.5', "
"every gene cluster with a geometric homogeneity index greater than 0.5 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that have many not be as conserved as others"}
),
'min-combined-homogeneity-index': (
['--min-combined-homogeneity-index'],
{'default': -1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--min-combined-homogeneity-index 0.3', "
"every gene cluster with a combined homogeneity index less than 0.3 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that are highly conserved overall"}
),
'max-combined-homogeneity-index': (
['--max-combined-homogeneity-index'],
{'default': 1,
'metavar': 'FLOAT',
'type': float,
'help': "This filter will remove gene clusters from your report. If you say '--max-combined-homogeneity-index 0.5', "
"every gene cluster with a combined homogeneity index greater than 0.5 will be removed from your analysis. This "
"can be useful if you only want to look at gene clusters that have many not be as conserved overall as others"}
),
'add-into-items-additional-data-table': (
['--add-into-items-additional-data-table'],
{'default': None,
'metavar': 'NAME',
'help': "If you use any of the filters, and would like to add the resulting item names into the items additional "
"data table of your database, you can use this parameter. You will need to give a name for these results to "
"be saved. If the given name is already in the items additional data table, its contents will be replaced "
"with the new one. Then you can run anvi-interactive or anvi-display-pan to 'see' the results of your filters."}
),
'concatenate-genes': (
['--concatenate-genes'],
{'default': False,
'action': 'store_true',
'help': "Concatenate output genes in the same order to create a multi-gene alignment output that is suitable "
"for phylogenomic analyses."}
),
'separator': (
['--separator'],
{'metavar': 'STRING',
'default': None,
'type': str,
'help': "Characters to separate things (the default is whatever is most suitable)."}
),
'align-with': (
['--align-with'],
{'metavar': 'ALIGNER',
'default': None,
'type': str,
'help': "The multiple sequence alignment program to use when multiple sequence alignment is necessary. To see "
"all available options, use the flag `--list-aligners`."}
),
'list-aligners': (
['--list-aligners'],
{'default': False,
'action': 'store_true',
'help': "Show available software for multiple sequence alignment."}
),
'concatenate-gene-clusters': (
['--concatenate-gene-clusters'],
{'default': False,
'action': 'store_true',
'help': "Concatenate output gene clusters in the same order to create a multi-gene alignment output that is suitable "
"for phylogenomic analyses."}
),
'partition-file': (
['--partition-file'],
{'metavar': 'FILE_PATH',
'default': None,
'type': str,
'help': "Some commonly used software for phylogenetic analyses (e.g., IQ-TREE, RAxML, etc) allow users to "
"specify/test different substitution models for each gene of a concatenated multiple sequence alignments. For "
"this, they use a special file format called a 'partition file', which indicates the site for each gene in the "
"alignment. You can use this parameter to declare an output path for anvi'o to report a NEXUS format partition "
"file in addition to your FASTA output (requested by Massimiliano Molari in #1333)."}
),
'report-DNA-sequences': (
['--report-DNA-sequences'],
{'default': False,
'action': 'store_true',
'help': "By default, this program reports amino acid sequences. Use this flag to report DNA sequences instead."}
),
'skip-multiple-gene-calls': (
['--skip-multiple-gene-calls'],
{'default': False,
'action': 'store_true',
'help': "When generating concatenated output skip gene clusters contain multiple gene calls."}
),
'list-available-gene-names': (
['-L', '--list-available-gene-names'],
{'default': False,
'action': 'store_true',
'help': "List available gene names in HMM sources selection and quit."}
),
'search-terms': (
['--search-terms'],
{'metavar': 'SEARCH_TERMS',
'help': "Search terms. Multiple of them can be declared separated by a delimiter (the default is a comma)."}
),
'sensitive': (
['--sensitive'],
{'default': False,
'action': 'store_true',
'help': "DIAMOND sensitivity. With this flag you can instruct DIAMOND to be 'sensitive', rather than 'fast' "
"during the search. It is likely the search will take remarkably longer. But, hey, if you are doing "
"it for your final analysis, maybe it should take longer and be more accurate. This flag is only "
"relevant if you are running DIAMOND."}
),
'gene-caller-ids': (
['--gene-caller-ids'],
{'metavar': 'GENE_CALLER_IDS',
'type': str,
'help': "Gene caller ids. Multiple of them can be declared separated by a delimiter (the default is a comma). "
"In anvi-gen-variability-profile, if you declare nothing you will get all genes matching your other "
"filtering criteria. In other programs, you may get everything, nothing, or an error. It really depends "
"on the situation. Fortunately, mistakes are cheap, so it's worth a try."}
),
'flank-mode': (
['--flank-mode'],
{'action': 'store_true',
'help': "If in --flank-mode, anvi-export-locus will extract a locus based on the coordinates "
"of flanking genes. You MUST provide 2 flanking genes in the form of TWO "
"--search-term, --gene-caller-ids, or --hmm-sources. The --flank-mode option is "
"appropriate for extracting loci of variable gene number lengths, but are consistently "
"located between the same flanking genes in the genome(s) of interest."}
),
'num-genes': (
['-n','--num-genes'],
{'metavar': 'NUM_GENES',
'type': str,
'help': "Required for DEFAULT mode. For each match (to the function, or HMM that was searched) a sequence which includes "
"a block of genes will be saved. The block could include either genes only in the forward direction of the gene (defined "
"according to the direction of transcription of the gene) or reverse or both. "
"If you wish to get both direction use a comma (no spaces) to define the block "
"For example, '-n 4,5' will give you four genes before and five genes after. "
"Whereas, '-n 5' will give you five genes after (in addition to the gene that matched). "
"To get only genes preceding the match use '-n 5,0'. If the number of genes requested "
"exceeds the length of the contig, then the output will include the sequence until the end "
"of the contig."}
),
'gene-mode': (
['--gene-mode'],
{'default': False,
'action': 'store_true',
'help': "Initiate the interactive interface in 'gene mode'. In this mode, the items are genes (instead of "
"splits of contigs). The following views are available: detection (the detection value of each gene "
"in each sample). The mean_coverage (the mean coverage of genes). The non_outlier_mean_coverage "
"(the mean coverage of the non-outlier nucleotide positions of each gene in each sample (median absolute "
"deviation is used to remove outliers per gene per sample)). The non_outlier_coverage_std view (standard deviation "
"of the coverage of non-outlier positions of genes in samples). You can also choose to order items "
"and layers according to each one of the aforementioned views. In addition, all layer ordering "
"that are available in the regular mode (i.e. the full mode where you have contigs/splits) are also "
"available in 'gene mode', so that, for example, you can choose to order the layers according to 'detection', and that "
"would be the order according to the detection values of splits, whereas if you choose 'genes_detections' "
"then the order of layers would be according to the detection values of genes. Inspection and sequence "
"functionality are available (through the right-click menu), except now sequences are of the specific gene. "
"Inspection has now two options available: 'Inspect Context', which brings you to the inspection page of the split "
"to which the gene belongs where the inspected gene will be highlighted in yellow in the bottom, and 'Inspect Gene', "
"which opens the inspection page only for the gene and 100 nts around each side of it (the purpose of this option "
"is to make the inspection page load faster if you only want to look at the nucleotide coverage of a specific gene). "
"NOTICE: You can't store states or collections in 'gene mode'. However, you still can make fake selections, and create "
"fake bins for your viewing convenience only (smiley). Search options are available, and you can even search for functions "
"if you have them in your contigs database. ANOTHER NOTICE: loading this mode might take a while if your bin "
"has many genes, and your profile database has many samples, this is because the gene coverages stats are "
"computed in an ad-hoc manner when you load this mode, we know this is not ideal and we plan to improve that "
"(along with other things). If you have suggestions/complaints regarding this mode please comment on this "
"github issue: https://goo.gl/yHhRei. Please refer to the online tutorial for more information."}
),
'gene-caller-id': (
['--gene-caller-id'],
{'metavar': 'GENE_CALLER_ID',
'type': int,
'help': "A single gene id."}
),
'target-version': (
['-t', '--target-version'],
{'metavar': 'VERSION',
'type': int,
'help': "Anvi'o will stop upgrading your database when it reaches to this version. "}
),
'delimiter': (
['--delimiter'],
{'metavar': 'CHAR',
'default': ',',
'help': "The delimiter to parse multiple input terms. The default is '%(default)s'."}
),
'wrap': (
['--wrap'],
{'metavar': 'WRAP',
'default': 120,
'type': int,
'help': "When to wrap sequences when storing them in a FASTA file. The default is "
"'%(default)d'. A value of '0' would be equivalent to 'do not wrap'."}
),
'no-wrap': (
['--no-wrap'],
{'default': False,
'action': 'store_true',
'help': "Do not be wrap sequences nicely in the output file."}
),
'leeway': (
['--leeway'],
{'metavar': 'LEEWAY_NTs',
'default': 100,
'type': int,
'help': "The minimum number of nucleotides for a given short read mapping into "
"the gene context for it to be reported. You must consider the length of "
"your short reads, as well as the length of the gene you are targeting. "
"The default is %(default)d nts."}
),
'flank-length': (
['--flank-length'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "Extend sequences for gene calls with additional nucleotides from both ends. If the seqeunce for "
"a target gene is between nucleotide positions START and STOP, using a flank lenght of M will give "
"you a sequence that starts at START - M and ends at STOP + M."}
),
'split-R1-and-R2': (
['-Q', '--split-R1-and-R2'],
{'default': False,
'action': 'store_true',
'help': "When declared, this program outputs 3 FASTA files for paired-end reads: one "
"for R1, one for R2, and one for unpaired reads."}
),
'gzip-output': (
['-X', '--gzip-output'],
{'default': False,
'action': 'store_true',
'help': "When declared, output file(s) will be gzip compressed and the extension `.gz` will be added."}
),
'list-contigs': (
['--list-contigs'],
{'default': False,
'action': 'store_true',
'help': "When declared, the program will list contigs in the BAM file and exit gracefully "
"without any further analysis."}
),
'list-splits': (
['--list-splits'],
{'default': False,
'action': 'store_true',
'help': "When declared, the program will list split names in the profile database and quite"}
),
'list-collections': (
['--list-collections'],
{'default': False,
'action': 'store_true',
'help': "Show available collections and exit."}
),
'list-bins': (
['--list-bins'],
{'default': False,
'action': 'store_true',
'help': "List available bins in a collection and exit."}
),
'list-states': (
['--list-states'],
{'default': False,
'action': 'store_true',
'help': "Show available states and exit."}
),
'show-views': (
['--show-views'],
{'default': False,
'action': 'store_true',
'help': "When declared, the program will show a list of available views, and exit."}
),
'list-completeness-sources': (
['--list-completeness-sources'],
{'default': False,
'action': 'store_true',
'help': "Show available sources and exit."}
),
'completeness-source': (
['--completeness-source'],
{'metavar': 'NAME',
'help': "Single-copy gene source to use to estimate completeness."}
),
'split-name': (
['--split-name'],
{'metavar': 'SPLIT_NAME',
'help': "Split name."}
),
'contig-name': (
['--contig-name'],
{'metavar': 'CONTIG_NAME',
'help': "Contig name."}
),
'program': (
['--program'],
{'metavar': 'PROGRAM_NAME',
'help': "Program name.",
'required': False,
'default': 'default'}
),
'splits-of-interest': (
['--splits-of-interest'],
{'metavar': 'FILE',
'help': "A file with split names. There should be only one column in the file, and each line "
"should correspond to a unique split name."}
),
'contigs-of-interest': (
['--contigs-of-interest'],
{'metavar': 'FILE',
'help': "It is possible to focus on only a set of contigs. If you would like to do that and ignore "
"the rest of the contigs in your contigs database, use this parameter with a flat file "
"every line of which desribes a single contig name."}
),
'samples-of-interest': (
['--samples-of-interest'],
{'metavar': 'FILE',
'help': "A file with samples names. There should be only one column in the file, and each line "
"should correspond to a unique sample name (without a column header)."}
),
'samples-txt': (
['--samples-txt'],
{'metavar': 'FILE',
'help': "A TAB-delimited file with columns ['sample', 'r1', 'r2'] or ['sample', 'group', 'r1', 'r2'] "
"where `r1` and `r2` columns are paths to compressed or flat FASTQ files for each `sample` and "
"`group` is an optional column for relevant applications where samples are affiliated with one-word "
"categorical variables that define to which group they are assigned."}
),
'genes-of-interest': (
['--genes-of-interest'],
{'metavar': 'FILE',
'help': "A file with anvi'o gene caller IDs. There should be only one column in the file, and each line "
"should correspond to a unique gene caller id (without a column header)."}
),
'gene-cluster-id': (
['--gene-cluster-id'],
{'metavar': 'GENE_CLUSTER_ID',
'help': "Gene cluster ID you are interested in."}
),
'gene-cluster-ids-file': (
['--gene-cluster-ids-file'],
{'metavar': 'FILE_PATH',
'help': "Text file for gene clusters (each line should contain be a unique gene cluster id)."}
),
'bin-id': (
['-b', '--bin-id'],
{'metavar': 'BIN_NAME',
'help': "Bin name you are interested in."}
),
'bin-names-list': (
['-b', '--bin-names-list'],
{'metavar': 'BIN NAMES',
'help': "Comma-separated list of bin names."}
),
'new-bin-name': (
['-B', '--new-bin-name'],
{'metavar': 'BIN NAME',
'help': "The new bin name."}
),
'bin-ids-file': (
['-B', '--bin-ids-file'],
{'metavar': 'FILE_PATH',
'help': "Text file for bins (each line should be a unique bin id)."}
),
'find-from-split-name': (
['--find-from-split-name'],
{'metavar': 'SPLIT_NAME',
'help': "If you don't know the bin name you want to work with but if you know the split name it contains "
"you can use this parameter to tell anvi'o the split name, and so it can find the bin for you "
"automatically. This is something extremely difficult for anvi'o to do, but it does it anyway "
"because you."}
),
'collection-name': (
['-C', '--collection-name'],
{'metavar': 'COLLECTION_NAME',
'help': "Collection name."}
),
'num-positions-from-each-split': (
['--num-positions-from-each-split'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "Each split may have one or more variable positions. By default, anvi'o will report every SNV "
"position found in a given split. This parameter will help you to define a cutoff for the maximum "
"number of SNVs to be reported from a split (if the number of SNVs is more than the number you "
"declare using this parameter, the positions will be randomly subsampled)."}
),
'min-scatter': (
['-m', '--min-scatter'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "This one is tricky. If you have N samples in your dataset, a given variable position x in one "
"of your splits can split your N samples into `t` groups based on the identity of the "
"variation they harbor at position x. For instance, `t` would have been 1, if all samples had the same "
"type of variation at position x (which would not be very interesting, because in this case "
"position x would have zero contribution to a deeper understanding of how these samples differ "
"based on variability. When `t` > 1, it would mean that identities at position x across samples "
"do differ. But how much scattering occurs based on position x when t > 1? If t=2, how many "
"samples ended in each group? Obviously, even distribution of samples across groups may tell "
"us something different than uneven distribution of samples across groups. So, this parameter "
"filters out any x if 'the number of samples in the second largest group' (=scatter) is less "
"than -m. Here is an example: let's assume you have 7 samples. While 5 of those have AG, 2 "
"of them have TC at position x. This would mean scatter of x is 2. If you set -m to 2, this "
"position would not be reported in your output matrix. The default value for -m is "
"%(default)d, which means every `x` found in the database and survived previous filtering "
"criteria will be reported. Naturally, -m cannot be more than half of the number of samples. "
"Please refer to the user documentation if this is confusing."}
),
'min-ratio-of-competings-nts': (
['-r', '--min-ratio-of-competings-nts'],
{'metavar': 'RATIO',
'default': 0,
'type': float,
'help': "Minimum ratio of the competing nucleotides at a given position. Default is %(default)d."}
),
'max-num-unique-positions': (
['-n', '--max-num-unique-positions'],
{'metavar': 'NUM_POSITIONS',
'default': 0,
'type': int,
'help': "Maximum number of unique positions to be used in the network. This may be one way to avoid extremely "
"large network descriptions that would defeat the purpose of a quick visualization. If there are more "
"unique positions in the variability profile, the program will randomly select a subset of them to match "
"the `max-num-unique-positions`. The default is %(default)d, which means all positions should be reported. "
"Remember that the number of nodes in the network will also depend on the number of samples described in "
"the variability profile."}
),
'num-threads': (
['-T', '--num-threads'],
{'metavar': 'NUM_THREADS',
'default': 1,
'type': int,
'help': "Maximum number of threads to use for multithreading whenever possible. Very conservatively, the default "
"is 1. It is a good idea to not exceed the number of CPUs / cores on your system. Plus, please "
"be careful with this option if you are running your commands on a SGE --if you are clusterizing your runs, "
"and asking for multiple threads to use, you may deplete your resources very fast."}
),
'num-parallel-processes': (
['-P', '--num-parallel-processes'],
{'metavar': 'NUM_PROCESSES',
'default': 1,
'type': int,
'help': "Maximum number of processes to run in parallel. Please note that this is different than number of threads. If you "
"ask for 4 parallel processes, and 5 threads, anvi'o will run four processes in parallel and assign 5 threads "
"to each. For resource allocation you must multiply the number of processes and threads."}
),
'variability-profile': (
['-V', '--variability-profile'],
{'metavar': 'VARIABILITY_TABLE',
'type': str,
'required': False,
'help': "The output of anvi-gen-variability-profile, or a different variant-calling output that has been converted to the "
"anvi'o format."}
),
'min-coverage-in-each-sample': (
['--min-coverage-in-each-sample'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "Minimum coverage of a given variable nucleotide position in all samples. If a nucleotide position "
"is covered less than this value even in one sample, it will be removed from the analysis. Default "
"is %(default)d."}
),
'min-departure-from-reference': (
['-r', '--min-departure-from-reference'],
{'metavar': 'FLOAT',
'default': 0,
'type': float,
'help': "Takes a value between 0 and 1, where 1 is maximum divergence from the reference. Default is %(default)f. "
"The reference here observation that corresponds to a given position in the mapped context."}
),
'max-departure-from-reference': (
['-z', '--max-departure-from-reference'],
{'metavar': 'FLOAT',
'default': 1,
'type': float,
'help': "Similar to '--min-departure-from-reference', but defines an upper limit for divergence. The "
"default is %(default)f."}
),
'min-departure-from-consensus': (
['-j', '--min-departure-from-consensus'],
{'metavar': 'FLOAT',
'default': 0,
'type': float,
'help': "Takes a value between 0 and 1, where 1 is maximum divergence from the consensus for a given position. The "
"default is %(default)f. The consensus is the most frequent observation at a given position."}
),
'max-departure-from-consensus': (
['-a', '--max-departure-from-consensus'],
{'metavar': 'FLOAT',
'default': 1,
'type': float,
'help': "Similar to '--min-departure-from-consensus', but defines an upper limit for divergence. The "
"default is %(default)f."}
),
'min-occurrence-of-variable-positions': (
['-x', '--min-occurrence'],
{'metavar': 'NUM_SAMPLES',
'default': 1,
'type': int,
'help': "Minimum number of samples a nucleotide position should be reported as variable. Default is %(default)d. "
"If you set it to 2, for instance, each eligible variable position will be expected to appear in at least "
"two samples, which will reduce the impact of stochastic, or unintelligible variable positions."}
),
'quince-mode': (
['--quince-mode'],
{'default': False,
'action': 'store_true',
'help': "The default behavior is to report allele frequencies only at positions where variation was reported "
"during profiling (which by default uses some heuristics to minimize "
"the impact of error-driven variation). So, if there are 10 samples, and a given position has been "
"reported as a variable site during profiling in only one of those samples, there will be no "
"information will be stored in the database for the remaining 9. When this flag is "
"used, we go back to each sample, and report allele frequencies for each sample at this position, "
"even if they do not vary. It will take considerably longer to report when this flag is on, and the use "
"of it will increase the file size dramatically, however it is inevitable for some statistical approaches "
"and visualizations."}
),
'kiefl-mode': (
['--kiefl-mode'],
{'default': False,
'action': 'store_true',
'help': "The default behavior is to report codon/amino-acid frequencies only at positions where variation was reported "
"during profiling (which by default uses some heuristics to minimize the impact of error-driven variation). "
"When this flag is used, all positions are reported, regardless of whether they contained variation in any "
"sample. The reference codon for all such entries is given a codon frequency of 1. All other entries (aka "
"those with legitimate variation to be reported) remain unchanged. This flag can only be used with `--engine AA` "
"or `--engine CDN` and is incompatible wth --quince-mode."}
),
'include-contig-names': (
['--include-contig-names'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you would like contig names for each variable position to be included in the "
"output file as a column. By default, we do not include contig names since they can practically "
"double the output file size without any actual benefit in most cases."}
),
'include-split-names': (
['--include-split-names'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you would like split names for each variable position to be included in the "
"output file as a column."}
),
'include-additional-data': (
['--include-additional-data'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you would like to append data stored in the `amino_acid_additional_data` table as "
"additional columns to your output. NOTE: This is not yet implemented for the `nucleotide_additional_data` "
"table."}
),
'include-site-pnps': (
['--include-site-pnps'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you want per-site pN and pS added as additional columns. Synonymity "
"will be calculated with respect to the reference, to the consenus, and to the "
"most common consensus seen at that site across samples (popular consensus). The number "
"of synonymous and nonsynonymous sites will also be stored for each case. This makes a total of 12 "
"added columns. This flag will be ignored if --engine is not CDN."}
),
'engine': (
['--engine'],
{'default': 'NT',
'metavar': 'ENGINE',
'type': str,
'help': "Variability engine. The default is '%(default)s'."}
),
'min-binding-frequency': (
['-m', '--min-binding-frequency'],
{'metavar': 'FLOAT',
'default': 0.2,
'type': float,
'help': "InteracDome has associated binding 'frequencies', which can be considered scores between 0 to 1 that "
"quantify how likely a position is to be involved in binding. Use this parameter to filter out low frequencies. "
"The default is %(default)f. Warning, your contigs database size will grow massively if this is set to 0.0, but "
"you're the boss."}
),
'min-hit-fraction': (
['-f', '--min-hit-fraction'],
{'metavar': 'FLOAT',
'default': 0.5,
'type': float,
'help': "Any hits where the hit length--relative to the HMM profile--divided by the total HMM profile length, is less than this value, "
"it will be removed from the results and will not contribute to binding frequencies. The default is %(default)s"}
),
'information-content-cutoff': (
['-t', '--information-content-cutoff'],
{'metavar': 'FLOAT',
'default': 4.0,
'type': float,
'help': "This parameter can be used to control for low-quality domain hits. Each domain is composed of positions (match states) "
"with varying degrees of conservancy, which can be quantified with information content (IC). High IC means highly conserved. "
"For example, IC = 4 corresponds to 95%% of the members of the Pfam sharing the same amino acid at that position. "
"By default, anvi'o demands that for an alignment of a user's gene with a Pfam HMM, the gene sequence must match with the "
"consensus amino acid of each match state that has IC > %(default)f. For context, it is common for a Pfam to not even have a "
"position with an IC > 4, so these represent truly very conserved positions. You can modify this with this parameter. For example, "
"if you think this is dumb, you can set this to 10000, and then no domain hits will be removed for this reason."}
),
'driver': (
['--driver'],
{'metavar': 'DRIVER',
'type': str,
'required': True,
'help': "Automatic binning drivers. Available options '%(choices)s'."}
),
'transpose': (
['--transpose'],
{'default': False,
'action': 'store_true',
'help': "Transpose the input matrix file before clustering."}
),
'skip-check-names': (
['--skip-check-names'],
{'default': False,
'action': 'store_true',
'help': "For debugging purposes. You should never really need it."}
),
'skip-news': (
['--skip-news'],
{'default': False,
'action': 'store_true',
'help': "Don't try to read news content from upstream."}
),
'experimental-org-input-dir': (
['-i', '--input-directory'],
{'metavar': 'DIR_PATH',
'type': str,
'help': "Input directory where the input files addressed from the configuration "
"file can be found (i.e., the profile database, if PROFILE.db::TABLE "
"notation is used in the configuration file)."}
),
'clustering-name': (
['-N', '--name'],
{'metavar': 'NAME',
'type': str,
'help': "The name to use when storing the resulting clustering in the database. "
"This name will appear in the interactive interface and other relevant "
"interfaces. Please consider using a short and descriptive single-word "
"(if you do not do that you will make anvi'o complain)."}
),
'distance': (
['--distance'],
{'metavar': 'DISTANCE_METRIC',
'type': str,
'default': constants.distance_metric_default,
'help': "The distance metric for the hierarchical clustering. The default distance "
"metric is '%(default)s'. You can find the full list of distance metrics "
"either by making a mistake (such as entering a non-existent distance metric "
"and making anvi'o upset), or by taking a look at the help menu of the "
"hierarchy.distance.pdist function in the scipy.cluster module."}
),
'linkage': (
['--linkage'],
{'metavar': 'LINKAGE_METHOD',
'type': str,
'default': constants.linkage_method_default,
'help': "The linkage method for the hierarchical clustering. The default linkage "
"method is '%(default)s', because that is the best one. It really is. We talked "
"to a lot of people and they were all like 'this is the best one available' and "
"it is just all out there. Honestly it is so good that we will build a wall around it "
"and make other linkage methods pay for it. But if you want to see a full "
"list of available ones you can check the hierarcy.linkage function in "
"the scipy.cluster module. Up to you really. But then you can't use %(default)s "
"anymore, and you would have to leave anvi'o right now."}
),
'input-dir': (
['--input-dir'],
{'metavar': 'DIR_PATH',
'type': str,
'help': "Directory path for input files"}
),
'output-dir': (
['-o', '--output-dir'],
{'metavar': 'DIR_PATH',
'type': str,
'help': "Directory path for output files"}
),
'output-file': (
['-o', '--output-file'],
{'metavar': 'FILE_PATH',
'type': str,
'help': "File path to store results."}
),
'log-file': (
['--log-file'],
{'metavar': 'FILE_PATH',
'default': None,
'type': str,
'help': "File path to store debug/output messages."}
),
'trna-hits-file': (
['--trna-hits-file'],
{'metavar': 'FILE_PATH',
'default': None,
'type': str,
'help': "File path to store raw hits from tRNA scan."}
),
'trna-cutoff-score': (
['--trna-cutoff-score'],
{'metavar': 'INT',
'default': 20,
'type': int,
'help': "Minimum score to assume a hit comes from a proper tRNA gene (passed to the tRNAScan-SE). "
"The default is %(default)d. It can get any value between 0-100."}
),
'also-scan-trnas': (
['--also-scan-trnas'],
{'default': False,
'action': 'store_true',
'help': "Also scan tRNAs while you're at it."}
),
'output-db-path': (
['-o', '--output-db-path'],
{'metavar': 'DB_FILE_PATH',
'type': str,
'help': "Output file path for the new database."}
),
'temporary-dir-path': (
['--temporary-dir-path'],
{'metavar': 'PATH',
'type': str,
'help': "If you don't provide anything here, this program will come up with a temporary "
"directory path by itself to store intermediate files, and clean it later. If you "
"want to have full control over this, you can use this flag to define one."}
),
'output-file-prefix': (
['-O', '--output-file-prefix'],
{'metavar': 'FILENAME_PREFIX',
'type': str,
'help': "A prefix to be used while naming the output files (no file type "
"extensions please; just a prefix)."}
),
'long-format': (
['--long-format'],
{'default': False,
'action': 'store_true',
'help': "Report the output file as a long-format TAB-delmited file instead of a TAB-delimited "
"sparse matrix."}
),
'matrix-format': (
['--matrix-format'],
{'default': False,
'action': 'store_true',
'help': "Report the output as TAB-delmited sparse matrix files."}
),
'raw-output': (
['--raw-output'],
{'default': False,
'action': 'store_true',
'help': "Just store the raw output without any processing of the primary data structure."}
),
'dry-run': (
['--dry-run'],
{'default': False,
'action': 'store_true',
'help': "Don't do anything real. Test everything, and stop right before wherever the developer "
"said 'well, this is enough testing', and decided to print out results."}
),
'skip-dry-run': (
['--skip-dry-run'],
{'default': False,
'action': 'store_true',
'help': "Don't do a dry run. Just start the workflow! Useful when your job is so big it takes "
"hours to do a dry run."}
),
'no-interactive': (
['--no-interactive'],
{'default': False,
'action': 'store_true',
'help': "Don't show anything interactive (if possible)."}
),
'verbose': (
['--verbose'],
{'default': False,
'action': 'store_true',
'help': "Be verbose, print more messages whenever possible. You may regret this."}
),
'concise': (
['--concise'],
{'default': False,
'action': 'store_true',
'help': "Don't be verbose, print less messages whenever possible."}
),
'report-minimal': (
['--report-minimal'],
{'default': False,
'action': 'store_true',
'help': "Report minimum amount of data for higher performance whenever possible. This flag turn "
"your output files into bare minimums for speed gains."}
),
'just-do-it': (
['--just-do-it'],
{'default': False,
'action': 'store_true',
'help': "Don't bother me with questions or warnings, just do it."}
),
'ip-address': (
['-I', '--ip-address'],
{'metavar': 'IP_ADDR',
'type': str,
'default': '0.0.0.0',
'help': "IP address for the HTTP server. The default ip address (%(default)s) should "
"work just fine for most."}
),
'browser-path': (
['--browser-path'],
{'metavar': 'PATH',
'type': str,
'default': None,
'help': "By default, anvi'o will use your default browser to launch the interactive interface. If you "
"would like to use something else than your system default, you can provide a full path for an "
"alternative browser using this parameter, and hope for the best. For instance we are using "
"this parameter to call Google's experimental browser, Canary, which performs better with "
"demanding visualizations."}
),
'api-url': (
['--api-url'],
{'metavar': 'API_URL',
'type': str,
'default': 'https://anvi-server.org',
'help': "Anvi'server url"}
),
'port-number': (
['-P', '--port-number'],
{'metavar': 'INT',
'default': None,
'type': int,
'help': "Port number to use for anvi'o services. If nothing is declared, anvi'o will try to find "
"a suitable port number, starting from the default port number, %d." % constants.default_port_number}
),
'user': (
['--user'],
{'metavar': 'USERNAME',
'default': None,
'type': str,
'help': "The user for an anvi'server."}
),
'user-server-shutdown': (
['--user-server-shutdown'],
{'default': False,
'action': 'store_true',
'help': "Allow users to shutdown an anvi'server via web interface."}
),
'read-only': (
['--read-only'],
{'default': False,
'action': 'store_true',
'help': "When the interactive interface is started with this flag, all 'database write' "
"operations will be disabled."}
),
'server-only': (
['--server-only'],
{'default': False,
'action': 'store_true',
'help': "The default behavior is to start the local server, and fire up a browser that "
"connects to the server. If you have other plans, and want to start the server "
"without calling the browser, this is the flag you need."}
),
'password-protected': (
['--password-protected'],
{'default': False,
'action': 'store_true',
'help': "If this flag is set, command line tool will ask you to enter a password and interactive "
"interface will be only accessible after entering same password. This option is recommended "
"for shared machines like clusters or shared networks where computers are not isolated."}
),
'store-in-db': (
['--store-in-db'],
{'default': False,
'action': 'store_true',
'help': "Store analysis results into the database directly."}
),
'skip-store-in-db': (
['--skip-store-in-db'],
{'default': False,
'action': 'store_true',
'help': "By default, analysis results are stored in the profile database. The use of "
"this flag will let you skip that"}
),
'min-e-value': (
['-e', '--min-e-value'],
{'metavar': 'E-VALUE',
'default': 1e-15,
'type': float,
'help': "Minimum significance score of an HMM find to be considered as a valid hit. "
"Default is %(default)g."}
),
'max-num-target-sequences': (
['--max-num-target-sequences'],
{'metavar': 'NUMBER',
'default': 20,
'type': float,
'help': "Maximum number of target sequences to request from BLAST or DIAMOND searches. The default is %(default)g%%."}
),
'fetch-filter': (
['--fetch-filter'],
{'metavar': 'FILTER',
'default': None,
'type': str,
'help': f"By default, anvi'o fetches all reads from a BAM file. Once a read is 'fetched', some "
f"reads may be excluded if you have used parameters such as `--min-percent-identity`. "
f"But the `--fetch-filter` is different as it determines WHICH reads from a BAM file "
f"will be used for profiling at all. You can do a lot of fun things with this parameter. "
f"For details, please read the online documentation for `anvi-profile` using the URL "
f"you should see at the end of the `--help` output on your terminal. The known filters are "
f"the following: {', '.join([k for k in constants.fetch_filters.keys() if k])}."}
),
'min-percent-identity': (
['--min-percent-identity'],
{'metavar': 'PERCENT_IDENTITY',
'default': 80.0,
'type': float,
'help': "Minimum percent identity. The default is %(default)g%%."}
),
'min-full-percent-identity': (
['--min-full-percent-identity'],
{'metavar': 'FULL_PERCENT_IDENTITY',
'default': 20.0,
'type': float,
'help': "In some cases you may get high raw ANI estimates (percent identity scores) "
"between two genomes that have little to do with each other simply because only "
"a small fraction of their content may be aligned. This can be partly "
"alleviated by considering the *full* percent identity, which includes in its "
"calculation regions that did not align. For example, if the alignment is a "
"whopping 97 percent identity but only 8 percent of the genome aligned, the *full* "
"percent identity is 0.970 * 0.080 = 0.078 OR 7.8 percent. *full* percent "
"identity is always included in the report, but you can also use it as a filter "
"for other metrics, such as percent identity. This filter will set all ANI "
"measures between two genomes to 0 if the *full* percent identity is less than "
"you deem trustable. When you set a value, anvi'o will go through the ANI "
"results, and set all ANI measures between two genomes to 0 if the *full* "
"percent identity *between either of them* is less than the parameter described "
"here. The default is %(default)g."}
),
'use-full-percent-identity': (
['--use-full-percent-identity'],
{'action': 'store_true',
'help': "Usually, percent identity is calculated only over aligned regions, and this "
"is what is used as a distance metric by default. But with this flag, "
"you can instead use the *full* percent identity as the distance metric. It is the "
"same as percent identity, except that regions that did not align are included "
"in the calculation. This means *full* percent identity will always be less than or "
"equal to percent identity. How is it calculated? Well if P is the percentage identity "
"calculated in aligned regions, L is the length of the genome, and A is the fraction "
"of the genome that aligned to a compared genome, the full percent identity is "
"P * (A/L). In other words, it is the percent identity multiplied by the alignment "
"coverage. For example, if the alignment is a whopping 97 percent identity but "
"only 8 percent of the genome aligned, the *full* percent identity is 0.970 * 0.080 "
"= 0.078, which is just 7.8 percent."}
),
'min-alignment-fraction': (
['--min-alignment-fraction'],
{'default': 0.0,
'metavar': 'NUM',
'type': float,
'help': "In some cases you may get high raw ANI estimates "
"(percent identity scores) between two genomes that have little to do with each other "
"simply because only a small fraction of their content may be aligned. This filter will "
"set all ANI scores between two genomes to 0 if the alignment fraction is less than you "
"deem trustable. When you set a value, anvi'o will go through the ANI results, and set "
"percent identity scores between two genomes to 0 if the alignment fraction *between either "
"of them* is less than the parameter described here. The default is %(default)g."}
),
'significant-alignment-length': (
['--significant-alignment-length'],
{'default': None,
'metavar': 'INT',
'type': int,
'help': "So --min-alignment-fraction "
"discards any hit that is coming from alignments that represent shorter fractions of genomes, "
"but what if you still don't want to miss an alignment that is longer than an X number of "
"nucleotides regardless of what fraction of the genome it represents? Well, this parameter is "
"to recover things that may be lost due to --min-alignment-fraction parameter. Let's say, "
"if you set --min-alignment-fraction to '0.05', and this parameter to '5000', anvi'o will keep "
"hits from alignments that are longer than 5000 nts, EVEN IF THEY REPRESENT less than 5 percent of "
"a given genome pair. Basically if --min-alignment-fraction is your shield to protect yourself "
"from incoming garbage, --significant-alignment-length is your chopstick to pick out those that "
"may be interesting, and you are a true warrior here."}
),
'bins-info': (
['--bins-info'],
{'metavar': 'BINS_INFO',
'help': "Additional information for bins. The file must contain three TAB-delimited columns, "
"where the first one must be a unique bin name, the second should be a 'source', and the "
"last one should be a 7 character HTML color code (i.e., '#424242'). Source column must "
"contain information about the origin of the bin. If these bins are automatically "
"identified by a program like CONCOCT, this column could contain the program name and "
"version. The source information will be associated with the bin in various interfaces "
"so in a sense it is not *that* critical what it says there, but on the other hand it is, "
"becuse we should also think about people who may end up having to work with what we put "
"together later."}
),
'bins': (
['--bins'],
{'metavar': 'BINS_DATA',
'help': "Tab-delimited file, first column contains tree leaves (gene clusters, splits, contigs etc.) "
"and second column contains which Bin they belong."}
),
'contigs-mode': (
['--contigs-mode'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if your binning was done on contigs instead of splits. Please refer "
"to the documentation for help."}
),
'sample-name': (
['-S', '--sample-name'],
{'metavar': 'NAME',
'help': "It is important to set a sample name (using only ASCII letters and digits "
"and without spaces) that is unique (considering all others). If you do not "
"provide one, anvi'o will try to make up one for you based on other information "
"(although, you should never let the software decide these things)."}
),
'project-name': (
['-n', '--project-name'],
{'metavar': 'PROJECT_NAME',
'help': "Name of the project. Please choose a short but descriptive name (so anvi'o can use "
"it whenever she needs to name an output file, or add a new table in a database, or name "
"her first born)."}
),
'skip-hierarchical-clustering': (
['--skip-hierarchical-clustering'],
{'default': False,
'action': 'store_true',
'help': "If you are not planning to use the interactive interface (or if you have other "
"means to add a tree of contigs in the database) you may skip the step where "
"hierarchical clustering of your items are preformed based on default clustering "
"recipes matching to your database type."}
),
'skip-variability-tables': (
['--skip-variability-tables'],
{'default': False,
'action': 'store_true',
'help': "Processing variability tables in profile database might take a very long time. With "
"this flag you will be asking anvi'o to skip them."}
),
'enforce-hierarchical-clustering': (
['--enforce-hierarchical-clustering'],
{'default': False,
'action': 'store_true',
'help': "If you have more than 25,000 splits in your merged profile, anvi-merge will automatically "
"skip the hierarchical clustering of splits (by setting --skip-hierarchical-clustering flag "
"on). This is due to the fact that computational time required for hierarchical clustering "
"increases exponentially with the number of items being clustered. Based on our experience "
"we decided that 25,000 splits is about the maximum we should try. However, this is not a "
"theoretical limit, and you can overwrite this heuristic by using this flag, which would "
"tell anvi'o to attempt to cluster splits regardless."}
),
'compress-auxiliary-data': (
['--compress-auxiliary-data'],
{'default': False,
'action': 'store_true',
'help': "When declared, the auxiliary data file in the resulting output will be compressed. This "
"saves space, but it takes long. Also, if you are planning to compress the entire "
"later using GZIP, it is even useless to do. But you are the boss!"}
),
'cluster-contigs': (
['--cluster-contigs'],
{'default': False,
'action': 'store_true',
'help': "Single profiles are rarely used for genome binning or visualization, and since "
"clustering step increases the profiling runtime for no good reason, the default "
"behavior is to not cluster contigs for individual runs. However, if you are "
"planning to do binning on one sample, you must use this flag to tell anvi'o to "
"run cluster configurations for single runs on your sample."}
),
'num-clusters-requested': (
['--num-clusters-requested'],
{'metavar': 'INT',
'default': 400,
'type': int,
'help': "How many clusters do you request? Default is %(default)d."}
),
'overwrite-output-destinations': (
['-W', '--overwrite-output-destinations'],
{'default': False,
'action': 'store_true',
'help': "Overwrite if the output files and/or directories exist."}
),
'delete-if-exists': (
['--delete-if-exists'],
{'default': False,
'action': 'store_true',
'help': "Be bold (at your own risk), and delete if exists."}
),
'report-variability-full': (
['--report-variability-full'],
{'default': False,
'action': 'store_true',
'help': "One of the things anvi-profile does is to store information about variable "
"nucleotide positions (SNVs). Usually it does not report every variable position, since "
"not every variable position is genuine variation. Say, if you have 1,000 coverage, "
"and all nucleotides at that position are Ts and only one of them is a C, the "
"confidence of that C being a real variation is quite low. anvi'o has a simple "
"algorithm in place to reduce the impact of noise. However, using this flag "
"you can disable it and ask profiler to report every single variation (which "
"may result in very large output files and millions of reports, but you are the "
"boss). Do not forget to take a look at '--min-coverage-for-variability' parameter. "
"Also note that this flag controls indel reporting: normally '--min-coverage-for-variability' "
"and internal anvi'o heuristics control whether or not indels should be reported, but with this "
"flag all indels are reported."}
),
'report-extended-deflines': (
['--report-extended-deflines'],
{'default': False,
'action': 'store_true',
'help': "When declared, the deflines in the resulting FASTA file will contain more information."}
),
'manual-mode': (
['--manual-mode'],
{'default': False,
'action': 'store_true',
'help': "Using this flag, you can run the interactive interface in an ad hoc manner using "
"input files you curated instead of standard output files generated by an anvi'o "
"run. In the manual mode you will be asked to provide a profile database. In this "
"mode a profile database is only used to store 'state' of the interactive interface "
"so you can reload your visual settings when you re-analyze the same files again. If "
"the profile database you provide does not exist, anvi'o will create an empty one for "
"you."}
),
'hmm-profile-dir': (
['-H', '--hmm-profile-dir'],
{'metavar': 'HMM PROFILE PATH',
'help': "You can use this parameter you can specify a directory path that contain an HMM profile. "
"This way you can run HMM profiles that are not included in anvi'o. See the online "
"to find out about the specifics of this directory structure ."}
),
'installed-hmm-profile': (
['-I', '--installed-hmm-profile'],
{'metavar': 'HMM PROFILE NAME(S)'}
),
'hmmer-output-dir': (
['--hmmer-output-dir'],
{'metavar': 'OUTPUT DIRECTORY PATH',
'help': "If you provide a path with this parameter, then the HMMER output file(s) will be saved "
"in this directory. Please note that this will only work if you are running on only one "
"profile using the -I flag."}
),
'domain-hits-table': (
['--domain-hits-table'],
{'default': False,
'action': 'store_true',
'help': "Use this flag in conjunction with --hmmer-output-dir to request domain table output "
"from HMMER (i.e., the file specified by the --domtblout flag from hmmsearch or hmmscan). Otherwise, only the regular "
"--tblout file will be stored in the specified directory. Please note that even if you use "
"this flag, the HMM hits stored in the database will be taken from the --tblout file only. "
"Also, this option only works with HMM profiles for amino acid sequences (not nucleotides)."}
),
'add-to-functions-table': (
['--add-to-functions-table'],
{'default': False,
'action': 'store_true',
'help': "Use this flag if you want anvi'o to store your HMM hits as gene annotations in the 'gene_functions'"
"table of the database, rather than in the 'hmm_hits' table."}
),
'min-contig-length': (
['-M', '--min-contig-length'],
{'metavar': 'INT',
'default': 1000,
'type': int,
'help': "Minimum length of contigs in a BAM file to analyze. The minimum length should be long enough "
"for tetra-nucleotide frequency analysis to be meaningful. There is no way to define a golden "
"number of minimum length that would be applicable to genomes found in all environments, but we "
"chose the default to be %(default)d, and have been happy with it. You are welcome to experiment, "
"but we advise to never go below 1,000. You also should remember that the lower you go, the more "
"time it will take to analyze all contigs. You can use --list-contigs parameter to have an idea how "
"many contigs would be discarded for a given M."}
),
'max-contig-length': (
['--max-contig-length'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "Just like the minimum contig length parameter, but to set a maximum. Basically this will remove "
"any contig longer than a certain value. Why would anyone need this? Who knows. But if you ever "
"do, it is here."}
),
'min-mean-coverage': (
['-X', '--min-mean-coverage'],
{'metavar': 'INT',
'default': 0,
'type': int,
'help': "Minimum mean coverage for contigs to be kept in the analysis. The default value is %(default)d, "
"which is for your best interest if you are going to profile multiple BAM files which are then "
"going to be merged for a cross-sectional or time series analysis. Do not change it if you are not "
"sure this is what you want to do."}
),
'min-coverage-for-variability': (
['-V', '--min-coverage-for-variability'],
{'metavar': 'INT',
'default': 10,
'type': int,
'help': "Minimum coverage of a nucleotide position to be subjected to SNV profiling. By default, anvi'o will "
"not attempt to make sense of variation in a given nucleotide position if it is covered less than "
"%(default)dX. You can change that minimum using this parameter. This parameter also controls the minimum "
"coverage for reporting indels. If an indel is observed at a position, yet the coverage of the position "
"in the contig where the indel starts is less than this parameter, the indel will be discarded."}
),
'contigs-and-positions': (
['--contigs-and-positions'],
{'metavar': 'CONTIGS_AND_POS',
'required': True,
'help': "This is the file where you list the contigs, and nucleotide positions you are interested in. This "
"is supposed to be a TAB-delimited file with two columns. In each line, the first column should be "
"the contig name, and the second column should be the comma-separated list of integers for nucleotide "
"positions."}
),
'state-autoload': (
['--state-autoload'],
{'metavar': 'NAME',
'help': "Automatically load previous saved state and draw tree. To see a list of available states, "
"use --show-states flag."}
),
'load-full-state': (
['--load-full-state'],
{'required': False,
'action': 'store_true',
'help': "Often the minimum and maximum values defined for the an entire profile database that contains "
"all contigs do not scale well when you wish to work with a single bin in the refine mode. For "
"this reason, the default behavior of anvi-refine is to ignore min/max values set in the default "
"state. This flag is your way of telling anvi'o to not do that, and load the state stored in the "
"profile database as is. Please note that this variable has no influence on the `detection` view. "
"For the `detection` view, anvi'o will always load the global detection settings as if you have "
"used this flag."}
),
'state': (
['-s', '--state'],
{'metavar': 'STATE',
'help': "State file, you can export states from database using anvi-export-state program"}
),
'collection-autoload': (
['--collection-autoload'],
{'metavar': 'NAME',
'help': "Automatically load a collection and draw tree. To see a list of available collections, "
"use --list-collections flag."}
),
'full-report': (
['--full-report'],
{'metavar': 'FILE_NAME',
'default': None,
'help': "Optional output file with a fuller description of findings."}
),
'include-sequences': (
['--include-sequences'],
{'default': False,
'action': 'store_true',
'help': "Include sequences in the report."}
),
'show-states': (
['--show-states'],
{'default': False,
'action': 'store_true',
'help': "When declared the program will print all available states and exit."}
),
'skip-init-functions': (
['--skip-init-functions'],
{'default': False,
'action': 'store_true',
'help': "When declared, function calls for genes will not be initialized (therefore will be missing from all "
"relevant interfaces or output files). The use of this flag may reduce the memory fingerprint and "
"processing time for large datasets."}
),
'init-gene-coverages': (
['--init-gene-coverages'],
{'default': False,
'action': 'store_true',
'help': "Initialize gene coverage and detection data. This is a very computationally expensive step, but it is "
"necessary when you need gene level coverage data. The reason this is very computationally expensive "
"is because anvi'o computes gene coverages by going back to actual coverage values of each gene to "
"average them, instead of using contig average coverage values, for extreme accuracy."}
),
'reformat-contig-names': (
['--reformat-contig-names'],
{'default': False,
'action': 'store_true',
'help': "Reformat contig names while generating the summary output so they look fancy. With this flag, anvi'o "
"will replace the original names of contigs to those that include the bin name as a prefix in resulting "
"summary output files per bin. Use this flag carefully as it may influence your downstream analyses due "
"to the fact that your original contig names in your input FASTA file for the contigs database will not "
"be in the summary output. Although, anvi'o will report a conversion map per bin so you can recover the "
"original contig name if you have to."}
),
'skip-auto-ordering': (
['--skip-auto-ordering'],
{'default': False,
'action': 'store_true',
'help': "When declared, the attempt to include automatically generated orders of items based on additional data "
"is skipped. In case those buggers cause issues with your data, and you still want to see your stuff and "
"deal with the other issue maybe later."}
),
'quick-summary': (
['--quick-summary'],
{'default': False,
'action': 'store_true',
'help': "When declared the summary output will be generated as quickly as possible, with minimum amount "
"of essential information about bins."}
),
'only-complete-links': (
['--only-complete-links'],
{'default': False,
'action': 'store_true',
'help': "When declared, only reads that cover all positions will be reported. It is necessary to use this "
"flag if you want to perform oligotyping-like analyses on matching reads."}
),
'add-coverage': (
['--add-coverage'],
{'default': False,
'action': 'store_true',
'help': "Use this flag to request that coverage and detection values be added as columns in long-format "
"output files. You must provide the profile database corresonding to your contigs db for this to work."}
),
'users-data-dir': (
['-U', '--users-data-dir'],
{'metavar': 'USERS_DATA_DIR',
'type': str,
'help': "Input directory where the user database is read and stored by the server. A new database will be "
"created if no directory is found."}
),
'smtp-config-file': (
['-E', '--smtp-config-file'],
{'metavar': 'SMTP_CONFIG_INI',
'type': str,
'help': "The configuration file for SMTP server to send e-mails. The input file should be formatted as an INI "
"file that starts with the header '[SMTP]', and should describe values of each of these variables in "
"the following lines: 'from_address' (the e-mail address that should appear in the 'From' section of "
"e-mails sent by the server), 'server_address' (the address of the SMTP server to connect), 'server_port' "
"(the port number to connect), 'init_tls' (whether to initialize TLS protocol), 'username' (the username "
"for the server to login, if necessary), 'password' (the password associated with the username for login, "
"if the password is not blank)."}
),
'validate-users-automatically': (
['--validate-users-automatically'],
{'default': True,
'action': 'store_true',
'help': "If this is true, users will not receive a link via email to confirm their account but instead be validated "
"automatically if there is no smtp configuration."}
),
'queue-size': (
['--queue-size'],
{'default': 0,
'metavar': 'INT',
'required': False,
'help': "The queue size for worker threads to store data to communicate to the main thread. The default is set by the "
"class based on the number of threads. If you have *any* hesitation about whether you know what you are doing, "
"you should not change this value."}
),
'ngram-window-range': (
['--ngram-window-range'],
{'default': "2:3",
'metavar': "NGRAM_WINDOW_RANGE",
'type': str,
'required': False,
'help': "The range of window sizes of Ngrams to analyze for synteny patterns."
"Please format the window-range as x:y (e.g. Window sizes 2 to 4 would be denoted as: 2:4)"}
),
'write-buffer-size': (
['--write-buffer-size'],
{'default': 500,
'metavar': 'INT',
'required': False,
'help': "How many items should be kept in memory before they are written to the disk. The default is "
"%(default)d. The larger the buffer size, the less frequently the program will access the disk, yet the more memory "
"will be consumed since the processed items will be cleared off the memory only after they are written "
"to the disk. The default buffer size will likely work for most cases, but if "
"you feel you need to reduce it, we trust you. Please keep an eye on the memory "
"usage output to make sure the memory use never exceeds the size of the "
"physical memory."}
),
'write-buffer-size-per-thread': (
['--write-buffer-size-per-thread'],
{'default': 500,
'metavar': 'INT',
'required': False,
'help': "How many items should be kept in memory before they are written do the disk. The default is "
"%(default)d per thread. So a single-threaded job would have a write buffer size of "
"%(default)d, whereas a job with 4 threads would have a write buffer size of 4*%(default)d. "
"The larger the buffer size, the less frequent the program will access to the disk, yet the more memory "
"will be consumed since the processed items will be cleared off the memory only after they are written "
"to the disk. The default buffer size will likely work for most cases. Please keep an eye on the memory "
"usage output to make sure the memory use never exceeds the size of the physical memory."}
),
'export-gff3': (
['--export-gff3'],
{
'default': False,
'action': 'store_true',
'help': "If this is true, the output file will be in GFF3 format."
}
),
'min-palindrome-length': (
['-l', '--min-palindrome-length'],
{'default': 10,
'metavar': 'INT',
'type': int,
'help': "The minimum palindrome length."}
),
'min-distance': (
['-d', '--min-distance'],
{'default': 50,
'metavar': 'INT',
'type': int,
'help': "The minimum distance between the palindromic sequences (this parameter is essentially "
"asking for the number of `x` in the sequence `ATCGxxxCGAT`). The default is 50, "
"which means the algorithm will never report by default sequences that are like "
"`ATCGCGAT` with no gaps between the palindrome where the palindromic sequence matches "
"itself (but you can get such palindromes by setting this parameter to 0)."}
),
'max-num-mismatches': (
['-m', '--max-num-mismatches'],
{'default': 0,
'metavar': 'INT',
'type': int,
'help': "The maximum number of mismatches allowed."}
),
'export-svg': (
['--export-svg'],
{'type': str,
'metavar': 'FILE_PATH',
'required': False,
'help': "The SVG output file path."}
),
'tab-delimited': (
['--tab-delimited'],
{'default': False,
'required': False,
'action': 'store_true',
'help': "Use the TAB-delimited format for the output file."}
),
'splits-mode': (
['--splits-mode'],
{'default': False,
'action': 'store_true',
'help': "Specify this flag if you would like to output coverages of individual 'splits', rather than their 'parent' "
"contig coverages."}
),
'report-as-text': (
['--report-as-text'],
{'default': False,
'action': 'store_true',
'help': "If you give this flag, Anvi'o will not open new browser to show Contigs database statistics and write all stats "
"to TAB separated file and you should also give --output-file with this flag otherwise Anvi'o will complain."}
),
'dump-dir': (
['--dump-dir'],
{'required': False,
'help': "Modeling and annotating structures requires a lot of moving parts, each which have "
"their own outputs. The output of this program is a structure database containing the "
"pertinent results of this computation, however a lot of stuff doesn't make the cut. "
"By providing a directory for this parameter you will get, in addition to the structure "
"database, a directory containing the raw output for everything."}
),
'include-subdirs': (
['--include-subdirs'],
{'default': False,
'action': 'store_true',
'help': "Also search subdirectories for files."}
),
'workflow': (
['-w', '--workflow'],
{'required': False,
'help': "You must specify a workflow name. To see a list of available workflows "
"run --list-workflows."}
),
'list-workflows': (
['--list-workflows'],
{'required': False,
'action': 'store_true',
'help': "Print a list of available snakemake workflows"}
),
'save-workflow-graph': (
['--save-workflow-graph'],
{'required': False,
'action': 'store_true',
'help': "Save a graph representation of the workflow. If you are using this flag and if your "
"system is unable to generate such graph outputs, you will hear anvi'o complaining "
"(still, totally worth trying)."}
),
'get-default-config': (
['--get-default-config'],
{'metavar': 'OUTPUT_FILENAME',
'type': str,
'help': "Store a json formatted config file with all the default settings of the "
"workflow. This is a good draft you could use in order to write your own "
"config file. This config file contains all parameters that could be configured "
"for this workflow. NOTICE: the config file is provided with default values "
"only for parameters that are set by us in the workflow. The values for the rest "
"of the parameters are determined by the relevant program."}
),
'list-dependencies': (
['--list-dependencies'],
{'required': False,
'action': 'store_true',
'help': "Print a list of the dependencies of this workflow. You must provide a workflow name "
"and a config file. snakemake will figure out which rules need to be run according "
"to your config file, and according to the files available on your disk. According "
"to the rules that need to be run, we will let you know which programs are going to "
"be used, so that you can make sure you have all of them installed and loaded."}
),
'config-file': (
['-c', '--config-file'],
{'required': False,
'help': "A JSON-formatted configuration file."}
),
'additional-params': (
['-A', '--additional-params'],
{'required': False,
'nargs':'...', 'type':str,
'help': "Additional snakemake parameters to add when running snakemake. NOTICE: --additional-params "
"HAS TO BE THE LAST ARGUMENT THAT IS PASSED TO anvi-run-workflow, ANYTHING THAT "
"FOLLOWS WILL BE CONSIDERED AS PART OF THE ADDITIONAL PARAMETERS THAT ARE PASSED TO SNAKEMAKE. "
"Any parameter that is accepted by snakemake should be fair game here, but it is your "
"responsibility to make sure that whatever you added makes sense. To see what parameters are "
"available please refer to the snakemake documentation. For example, you could use this to set "
"up cluster submission using --additional-params --cluster 'YOUR-CLUSTER-SUBMISSION-CMD'."}
),
'self-key': (
['--self-key'],
{'default': None,
'type': str,
'help': "The key you wish to set or change."}
),
'self-value': (
['--self-value'],
{'default': None,
'type': str,
'help': "The value you wish to set for the self key."}
),
'no-variability': (
['--no-variability'],
{'required': False,
'action': 'store_true',
'help': "If provided, no measures of sequence heterogeneity (from short read data) will be overlaid "
"on structures."}
),
'compute-gene-coverage-stats': (
['--compute-gene-coverage-stats'],
{'required': False,
'action': 'store_true',
'help': "If provided, gene coverage statistics will be appended for each entry in variability report. "
"This is very useful information, but will not be included by default because it is an expensive "
"operation, and may take some additional time."}
),
'repository': (
['--repository'],
{'default': 'merenlab/anvio',
'type': str,
'help': "Source repository to download releases, currently only Github is supported. Enter in 'merenlab/anvio' format."}
),
'inseq-stats': (
['--inseq-stats'],
{'required': False,
'action': 'store_true',
'default': False,
'help': "Provide if working with INSeq/Tn-Seq genomic data. With this, all gene level "
"coverage stats will be calculated using INSeq/Tn-Seq statistical methods."}
),
'migrate-dbs-safely': (
['--migrate-dbs-safely'],
{'required': False,
'action': 'store_true',
'default': False,
'help': "If you chose this, anvi'o will first create a copy of your original database. If something "
"goes wrong, it will restore the original. If everything works, it will remove the old copy. "
"IF YOU HAVE DATABASES THAT ARE VERY LARGE OR IF YOU ARE MIGRATING MANY MANY OF THEM THIS "
"OPTION WILL ADD A HUGE I/O BURDEN ON YOUR SYSTEM. But still. Safety is safe."}
),
'migrate-dbs-quickly': (
['--migrate-dbs-quickly'],
{'required': False,
'action': 'store_true',
'default': False,
'help': "If you chose this, anvi'o will migrate your databases in place. It will be much faster (and arguably "
"more fun) than the safe option, but if something goes wrong, you will lose data. During the first "
"five years of anvi'o development not a single user lost data using our migration scripts as far as "
"we know. But there is always a first, and today might be your lucky day."}
),
'module-completion-threshold': (
['--module-completion-threshold'],
{'default': 0.75,
'metavar': 'NUM',
'type': float,
'help': "This threshold defines the point at which we consider a KEGG module to be 'complete' or "
"'present' in a given genome or bin. It is the fraction of steps that must be complete in "
" in order for the entire module to be marked complete. The default is %(default)g."}
),
'get-raw-data-as-json': (
['--get-raw-data-as-json'],
{'default': None,
'metavar': 'FILENAME_PREFIX',
'type': str,
'help': "If you want the raw metabolism estimation data dictionary in JSON-format, provide a filename prefix to this argument."
"The program will then output a file with the .json extension containing this data."}
),
'store-json-without-estimation': (
['--store-json-without-estimation'],
{'default': False,
'action': 'store_true',
'help': "This flag is used to control what is stored in the JSON-formatted metabolism data dictionary. When this flag is provided alongside the "
"--get-raw-data-as-json flag, the JSON file will be created without running metabolism estimation, and "
"that file will consequently include only information about KOfam hits and gene calls. The idea is that you can "
"then modify this file as you like and re-run this program using the flag --estimate-from-json."}
),
'estimate-from-json': (
['--estimate-from-json'],
{'default': None,
'metavar': 'FILE_PATH',
'type': str,
'help': "If you have a JSON file containing KOfam hits and gene call information from your contigs database "
"(such as a file produced using the --get-raw-data-as-json flag), you can provide that file to this flag "
"and KEGG metabolism estimates will be computed from the information within instead of from a contigs database."}
),
'enzymes-txt': (
['--enzymes-txt'],
{'default': None,
'metavar': 'FILE_PATH',
'type': str,
'help': "A tab-delimited file describing gene id, enzyme accession, functional annotation source for the enzyme, "
"and (optionally) coverage and detection values for the gene."}
),
'output-modes': (
['--output-modes'],
{'default': None,
'metavar': 'MODES',
'type': str,
'help': "Use this flag to indicate what information you want in the metabolism output files, by "
"providing a comma-separated list of output modes (each 'mode' you provide will result in a "
"different output file, all with the same prefix). To see a list of available output modes, "
"run this script with the flag --list-available-modes."}
),
'list-available-modes': (
['--list-available-modes'],
{'default': False,
'action': 'store_true',
'help': "Use this flag to see the available output modes and their descriptions."}
),
'custom-output-headers': (
['--custom-output-headers'],
{'default': None,
'metavar': 'HEADERS',
'type': str,
'help': "For use with the 'custom' output mode. Provide a comma-separated list of headers to include "
"in the output matrix. To see a list of available headers, run this script with the flag "
"--list-available-output-headers."}
),
'list-available-output-headers': (
['--list-available-output-headers'],
{'default': False,
'action': 'store_true',
'help': "Use this flag to see the available output headers."}
),
'keep-all-hits': (
['--keep-all-hits'],
{'default': False,
'action': 'store_true',
'help': "If you use this flag, anvi'o will not get rid of any raw HMM hits, even those that "
"are below the score threshold."}
),
'log-bitscores': (
['--log-bitscores'],
{'default': False,
'action': 'store_true',
'help': "Use this flag to generate a tab-delimited text file containing the bit scores "
"of every KOfam hit that is put in the contigs database."}
),
'heuristic-e-value': (
['-E', '--heuristic-e-value'],
{'default': 1.0e-5,
'metavar': 'FLOAT',
'type': float,
'help': "When considering hits that didn't quite make the bitscore cut-off for a gene, we "
"will only look at hits with e-values <= this number. (This is X.)"}
),
'heuristic-bitscore-fraction': (
['-H', '--heuristic-bitscore-fraction'],
{'default': 0.75,
'metavar': 'FLOAT',
'type': float,
'help': "When considering hits that didn't quite make the bitscore cut-off for a gene, we "
"will only look at hits with bitscores > the KEGG threshold * this number. (This is Y.) "
"It should be a fraction between 0 and 1 (inclusive)."}
),
'skip-bitscore-heuristic':(
['--skip-bitscore-heuristic'],
{'default': False,
'action': 'store_true',
'help': "If you just want annotations from KOfam hits that are above the KEGG bitscore "
"threshold, use this flag to skip the mumbo-jumbo we do here to relax those thresholds. "}
),
'include-metadata': (
['--include-metadata'],
{'default': False,
'action': 'store_true',
'help': "When asking for --matrix-format, you can use this flag to make sure the output matrix files include "
"columns with metadata for each KEGG Module or KO (like the module name and category for example) before "
"the sample columns."}
),
'only-complete': (
['--only-complete'],
{'default': False,
'action': 'store_true',
'help': "Choose this flag if you want only modules over the module completeness threshold to be included "
"in any output files."}
),
'include-zeros': (
['--include-zeros'],
{'default': False,
'action': 'store_true',
'help': "If you use this flag, output files will include modules with 0 percent completeness score, "
"and in the case of --matrix-format, output matrices will include rows with 0s in every sample. "}
),
'module-specific-matrices': (
['--module-specific-matrices'],
{'default': None,
'metavar': 'MODULE_LIST',
'help': "Provide a comma-separated list of module numbers to this parameter, and then you will get "
"a KO hits matrix for each module in the list."}
),
'no-comments': (
['--no-comments'],
{'default': False,
'action': 'store_true',
'help': "If you are requesting --module-specific-matrices but you don't want those matrices to include "
"comment lines in them (for example, perhaps you want to use them for clustering), you can use "
"this flag. Otherwise, by default these specific matrices will include comments delineating "
"which KOs are in each step of the module."}
),
'modules-txt': (
['-M', '--modules-txt'],
{'default': None,
'metavar': 'TEXT_FILE',
'help': "A tab-delimited text file specifying module completeness in every genome/MAG/sample "
"that you are interested in. The best way to get this file is to run `anvi-estimate-metabolism "
"--output-modes modules` on your samples of interest. Trust us."}
),
'groups-txt': (
['-G', '--groups-txt'],
{'default': None,
'metavar': 'TEXT_FILE',
'help': "A tab-delimited text file specifying which group each item belongs to. "
"Depending on the context, items here may be individual samples or genomes. "
"The first column must contain item names matching to those that are in your "
"input data. A different column should have the header 'group' and contain the "
"group name for each item. Each item should be associated with a single "
"group. It is always a good idea to define groups using single words without any fancy "
"characters. For instance, `HIGH_TEMPERATURE` or `LOW_FITNESS` are good group names. "
"`my group #1` or `IS-THIS-OK?`, are not good group names."}
),
'sample-header': (
['--sample-header'],
{'default': 'db_name',
'help': "The header of the column containing your sample names in the modules-txt input file. By "
"default this is 'db_name' because we are assuming you got your modules mode output by "
"running `anvi-estimate-metabolism` in multi mode (on multiple genomes or metagenomes), but "
"just in case you got it a different way, this is how you can tell anvi'o which column to "
"look at. The values in this column should correspond to those in the 'sample' column in "
"the groups-txt input file."}
),
'trnaseq-fasta': (
['-f', '--trnaseq-fasta'],
{'metavar': 'FASTA',
'required': False,
'help': "The FASTA file containing merged (quality-controlled) tRNA-seq reads from a sample. "
"We recommend generating this file via `anvi-run-workflow -w trnaseq` "
"to ensure proper merging of read pairs that may be partially or fully overlapping, "
"and to automatically produce anvi'o-compliant simple deflines. "
"If there is a problem, anvi'o will gracefully complain about it."}
),
'treatment': (
['--treatment'],
{'default': 'untreated',
'help': "The type of treatment applied during tRNA-seq sample preparation. "
"The values which are currently known to anvi'o are \"untreated\" and \"demethylase\", "
"as tRNA-seq samples are commonly split for these treatments. "
"Anvi'o will warn you if you do not choose one of these known options, but it will not affect data processing. "
"Treatment type is stored for further reference in the output tRNA-seq database, "
"and can be used in anvi-merge-trnaseq to affect which nucleotides are called at predicted modification sites in tRNA seed sequences."}
),
'write-checkpoints': (
['--write-checkpoints'],
{'default': False,
'action': 'store_true',
'help': "Use this flag to write pickle files of intermediate results at key points in anvi-trnaseq. "
"If anvi'o crashes for some reason, the argument, --load-checkpoint, with the associated checkpoint name "
"can be used to restart the program from the given checkpoint. "
"This can be useful for saving time if anvi'o crashes "
"or in comparing the results of different advanced program parameterizations "
"involved in later stages of the analytical pipeline after the checkpoint, "
"such as --min-trna-fragment-size and --agglomeration-max-mismatch-freq. "
"This flag will overwrite existing intermediate files in the output directory as needed."}
),
'load-checkpoint': (
['--load-checkpoint'],
{'choices': constants.TRNASEQ_CHECKPOINTS,
'help': "This option restarts `anvi-trnaseq` from the specified checkpoint. "
"It can be useful for saving time if anvi'o crashed after the checkpoint. "
"It can also be useful in comparing the results of different advanced program parameterizations "
"that are only involved in stages of the analytical pipeline after the checkpoint. "
"`anvi-trnaseq` must previously have been run with the flag, "
"`--write-checkpoints`, so that intermediate checkpoint files were generated. "
"Checkpoint \"profile\" restarts after tRNA profiling. "
"\"normalize\" restarts after sequence trimming and normalization. "
"\"map_fragments\" restarts after non-3' fragments have been mapped to normalized tRNA sequences. "
"\"substitutions\" restarts after potential modification-induced substitutions have been found. "
"\"indels\" restarts after modification-induced indels have been found, the last step in tRNA identification. "
"If `--write-checkpoints` is used in conjunction with `--load-checkpoint` "
"then all existing intermediate files from checkpoints following the one being loaded will be overwritten."}
),
'feature-param-file': (
['--feature-param-file'],
{'metavar': 'FILE',
'type': str,
'help': "A .ini file can be provided to set tRNA feature parameters "
"used in de novo profiling/identification of tRNA sequences from the 3' end. "
"Generate the default file with the command, `anvi-trnaseq --default-feature-param-file`. "
"Dashes in the default file show parameters that cannot be changed, "
"because they do not exist or are set in stone. "
"For instance, the program only detects base pairing in stems, "
"so only stem features are parameterized with a maximum allowed number of unpaired nucleotides, "
"while every other feature has a dash in the \"Number allowed unpaired\" column. "
"Two quotes in the default file show parameters that are not currently set. "
"To lift a constraint, a parameter value can be replaced by \"\". "
"For instance, the conserved purine at D loop/position 21 indicated by the value, 0,R, "
"can be replaced by \"\" to prevent the program from seeking a conserved nucleotide there. "
"Conserved nucleotides in a feature are set by pairs of zero-based indices and nucleotide symbols. "
"The index indicates the conserved position in the feature, relative to the 5' end of the feature. "
"The nucleotide symbol can be A, C, G, T (U in cDNA), R (purine), or Y (pyrimidine). "
"The index is separated from the symbol by a comma. "
"Multiple conserved positions in a feature are separted by a semicolon. "
"Feature profiling of a sequence halts when the number of allowed unconserved nucleotides in a feature "
"or the number of allowed unpaired positions in a stem is exceeded. "
"The default allowed number of unconserved nucleotides in the D loop, for example, is 1, "
"so 4 of the 5 conserved positions must be found for the D loop to be positively identified. "
"By default, 1 position is allowed to be unpaired (no Watson-Crick or G-T wobble base pair) "
"in each of the 4 stems; the user could, for instance, "
"lift this constraint on the acceptor stem by changing the value from 1 to \"\". "
"There are 3 variable-length sections of tRNA. The user could, for example, "
"change the allowed lengths of the V loop from a discontinuous range, \"4-5,9-23\", to a continuous range, \"4-23\"."}
),
'threeprime-termini': (
['--threeprime-termini'],
{'default': 'CCA,CC,C,CCAN,CCANN',
'type': str,
'help': "Termini represent the subsequences (in the 5'->3' orientation) "
"to expect at the 3' end of a tRNA read adjacent to the discriminator nucleotide. "
"tRNA feature profiling from the 3' end seeks a valid terminus prior to the discriminator and more 5' features. "
"3' terminal sequences can include the nucleotides, A, C, G, and T, and N, symbolizing any nucleotide. "
"A single underscore, \"_\", can be included in lieu of a sequence, "
"symbolizing the absence of a terminus such that the tRNA feature profile may end with the discriminator. "
"If \"_\" is not included, tRNA sequences ending in the discriminator will still be sought as *fragments* of profiled tRNA. "
"The order of sequences in the argument is the order of consideration in profiling. "
"For example, if CCA is the first 3' terminus considered, "
"and it produces a complete profile with no unconserved or unpaired nucleotides, then the other possible termini are not considered. "
"Other termini are only considered with the possibility of \"improvement\" in the feature profile."}
),
'min-length-long-fiveprime': (
['--min-length-long-fiveprime'],
{'default': 4,
'metavar': 'INT',
'type': int,
'help': "tRNA reads often extend beyond the 5' end of a mature tRNA sequence. "
"This can be biological in origin when the read is from pre-tRNA; artifactual in origin "
"when the reverse transcriptase runs off the end of the template, adding a small number ofs random bases; "
"or artifactual when the read is a chimera of tRNA at the 3' end and another, potentially non-tRNA, transcript at the 5' end. "
"Longer 5' extensions are more likely to be biological than artifactual due to the exclusion of runoff bases. "
"This parameter sets the minimum length of 5' sequence extensions "
"that are recorded in the tRNA-seq database output for further analysis."}
),
'min-trna-fragment-size': (
['--min-trna-fragment-size'],
{'default': 25,
'metavar': 'INT',
'type': int,
'help': "Anvi'o profiles a sequence as tRNA by identifying tRNA features from the 3' end of the sequence. "
"tRNA-seq datasets can include a significant number of tRNA fragments "
"that are not from the 3' end of the sequence ending in a recognized terminus, e.g., CCA. "
"These \"interior\" and 5' fragments can be of significant biological interest. "
"Fragments are identified by mapping unprofiled reads to profiled tRNAs that have their 3' termini trimmed off. "
"This parameter sets the minimum length of unprofiled reads searched in this manner. "
"The choice of %(default)d as the default value is motivated by considerations "
"of false positive matches and computational performance with a shorter minimum sequence length. "
"Since unprofiled reads are mapped to every unique profiled tRNA sequence, "
"a shorter minimum sequence length can make mapping take a very long time "
"and return too many alignments to store in memory for datasets of millions of reads. "
"Pay attention to python memory usage if you adjust this parameter downwards."}
),
'agglomeration-max-mismatch-freq': (
['--agglomeration-max-mismatch-freq'],
{'default': 0.03,
'metavar': 'FLOAT',
'type': float,
'help': "Anvi'o finds potential tRNA modifications by first agglomerating sequences "
"differing from one or more other sequences in the cluster by mismatches at a certain fraction of nucleotides. "
"This parameter sets the maximum mismatch fraction that is allowed, by default 0.03. "
"The value of this parameter is rounded to the nearest hundredth. "
"The default approximates 2/71, representing 2 mismatches in a full-length tRNA of length 74, not 71, "
"as 3' sequence variants, including the canonical 3'-CCA, are trimmed off prior to sequences being agglomerated. "
"(Average non-mitochondrial tRNAs range in length from 74-95.) "
"For example, consider 3 trimmed sequences of length 71 -- A, B and C -- and 1 sequence of length 65, D. "
"If A differs from B by a substitution at position 1 when aligned (mismatch frequency of 0.014), "
"and C differs from B at positions 10 and 20 (mismatch frequency of 0.028), "
"such that C differs from A by 3 substitutions (mismatch frequency of 0.042), "
"then A, B, and C will still agglomerate into a single cluster, "
"as each differs by no more than 2 substitutions from *some other sequence* in the cluster. "
"In contrast, sequence D differs from B at positions 30 and 40 (mismatch frequency of 0.031), "
"exceeding the 0.03 limit needed to agglomerate. "
"D forms its own cluster and is not consolidated into a single modified sequence with the others."}
),
'max-indel-freq': (
['--max-indel-freq'],
{'default': 0.05,
'metavar': 'FLOAT',
'type': float,
'help': "The maximum indel frequency constrains the number and length of modification-induced indels that can be found. "
"The value of this parameter is rounded to the nearest hundredth. "
"Anvi'o identifies tRNAs with potential modification-induced substitutions before finding indels. "
"tRNAs with substitutions are aligned with other sequences to find sequences differing only by indels. "
"The default parameter value of 0.05 allows 1 indel of length 3 to be found in a modified sequence of length 71. "
"(Modified sequences have the canonical 3'-CCA trimmed off, "
"so a sequence of length 71 represents the low end of the non-mitochondrial tRNA length range of 74-95.) "
"The default equivalently allows 2 indels of lengths 1 and 2 or 3 indels of length 1 in a sequence of length 71. "
"An indel of length 4 would result in a frequency of 0.056 and so would not be considered."}
),
'left-indel-buffer': (
['--left-indel-buffer'],
{'default': 3,
'metavar': 'INT',
'type': int,
'help': "This parameter sets the distance an indel must lie from the left end of a sequence alignment "
"in the search for modification-induced indels."
"The default buffer of 3 matches was chosen to prevent nontemplated and variant nucleotides "
"at the 5' end of tRNA reads from being mistakenly identified as indels."}
),
'right-indel-buffer': (
['--right-indel-buffer'],
{'default': 3,
'metavar': 'INT',
'type': int,
'help': "This parameter sets the distance an indel must lie from the right end of a sequence alignment "
"in the search for modification-induced indels. "
"The default buffer of 3 matches was chosen to prevent variant nucleotides "
"at the 3' end of tRNA reads from being mistakenly identified as indels."}
),
'skip-fasta-check': (
['--skip-fasta-check'],
{'default': False,
'action': 'store_true',
'help': "Don't check the input FASTA file for such things as proper defline formatting to speed things up."}
),
'profiling-chunk-size': (
['--profiling-chunk-size'],
{'default': 100000,
'metavar': 'INT',
'type': int,
'help': "Anvi'o manages memory consumption during tRNA feature profiling by chunking the unique input sequences. "
"This parameter sets the maximum number of sequences in each chunk. "
"Adjustment of this parameter has little effect on speed."}
),
'alignment-target-chunk-size': (
['--alignment-target-chunk-size'],
{'default': 25000,
'metavar': 'INT',
'type': int,
'help': "Anvi'o sequence alignment manages memory consumption by chunking the list of alignment targets, "
"so that queries are aligned to the first chunk of targets, then the second chunk, and so on. "
"This parameter sets the maximum number of target sequences in each chunk. "
"Memory management becomes important when aligning short queries to a large number of targets, "
"which involves searching queries against a massive number of k-mers "
"(equal in length to the shortest query) that have been extracted from targets. "
"Adjust this parameter downward if your system runs out of memory during alignment; "
"adjust this parameter upward to speed up alignment if you find that you are not memory-limited. "
"Ideally, we would set this parameter using a heuristic function "
"parameterized with the numbers and lengths of query and target sequences..."}
),
'default-feature-param-file': (
['--default-feature-param-file'],
{'metavar': 'OUTPUT_FILENAME',
'type': str,
'help': "Writes a tab-delimited .ini file containing default tRNA feature parameterizations "
"used in de novo profiling/identification of tRNA sequences from the 3' end. "
"Parameters can be modified by the user and the file fed back into anvi-trnaseq "
"through the --feature-param-file argument, the help description of which explains the file format."}
),
'print-default-feature-params': (
['--print-default-feature-params'],
{'default': False,
'action': 'store_true',
'help': "Prints to standard output a nicely formatted table of the default tRNA feature parameterizations "
"(which can be written to a tab-delimited .ini file by the option, --default-feature-param-file)."}
),
'max-reported-trna-seeds': (
['--max-reported-trna-seeds'],
{'default': 10000,
'metavar': 'INT',
'type': int,
'help': "This parameter limits the number of tRNA seed sequences reported in the contigs database, "
"as anvi-interactive can have trouble displaying large numbers of items. "
"To remove the limit on reported seeds, specify a value of -1."}
),
'feature-threshold': (
['--feature-threshold'],
{'default': 'anticodon_loop',
'type': str,
'choices': constants.TRNA_SEED_FEATURE_THRESHOLD_CHOICES,
'help': "This option prevents formation of tRNA seed sequences from input sequences "
"that did not reach the threshold feature in anvi-trnaseq profiling from the 3' end. "
"The more stringent the threshold, the fewer spurious seeds are formed "
"from rare chimeric and other inaccurate tRNA predictions. "
"The most stringent threshold is \"acceptor_stem\", the most 5' feature, "
"resulting in seeds formed only from tRNAs with a complete feature set "
"(with the exception of the extra 5'-G in tRNA-His)."}
),
'preferred-treatment': (
['--preferred-treatment'],
{'type': str,
'help': "tRNA-seq databases recorded as employing the preferred treatment are given preference "
"in setting nucleotides at predicted modification positions in tRNA seed sequences. "
"By default, equal preference is given to all of the input databases. "
"The reason for this parameter is that paired untreated and enzymatically treated splits "
"can assist in the identification of underlying modified nucleotides. "
"For example, splits treated with a demethylase can be compared to untreated splits "
"to probe which nucleotides are methylated."}
),
'nonspecific-output': (
['--nonspecific-output'],
{'default': 'nonspecific_db,combined_db',
'type': str,
'help': "A significant fraction of tRNA-seq reads can be from tRNA fragments. "
"These can be real biomolecules or artifactual 3' fragments "
"produced as a result of incomplete reverse transcription of the tRNA template to cDNA. "
"Rather than randomly assigning fragments to a single target, "
"as in metagenomic read recruitment by Bowtie, "
"anvi-trnaseq tracks all of the longer sequences containing each fragment. "
"This results in two categories of coverage: "
"'specific' for reads that are only found in one seed "
"and 'nonspecific' for reads found in multiple seeds. "
"Specific coverages are always reported in a separate profile database. "
"Nonspecific coverages can be reported in three types of database, as specified by this parameter. "
"'nonspecific_db' produces a profile database only containing nonspecific coverages. "
"'combined_db' produces a database containing separate specific and nonspecific layers. "
"'summed_db' produces a database containing summed specific and nonspecific coverages. "
"To produce multiple types of databases, separate the database types with commas (no spaces). "
"For example, all three databases are produced with the argument, 'nonspecific_db,combined_db,summed_db'."}
),
'min-variation': (
['--min-variation'],
{'default': 0.01,
'metavar': 'FLOAT',
'type': float,
'help': "When more than 2 nucleotides are found at a position in a tRNA, "
"a modification-induced mutation (substitution) is considered rather than a single nucleotide variant. "
"This parameter sets a key criterion for the prediction of a modification, "
"the minimum fraction of specific coverage at a position with more than 2 nucleotides "
"that must be contributed by nucleotides beside the most abundant nucleotide. "
"For example, if A, C, and G are found at position 20 of a tRNA, "
"and A is represented by 95 reads, C by 3 reads, and G by 1 read, then with a parameter value of 0.05, "
"the site would be 1 C, G, or T short of meeting the threshold for prediction of a modification."}
),
'min-third-fourth-nt': (
['--min-third-fourth-nt'],
{'default': 0.002,
'metavar': 'FLOAT',
'type': float,
'help': "This parameter sets a key criterion for the prediction of a modification, "
"the minimum fraction of specific coverage at a position with more than 2 nucleotides "
"that must be contributed by nucleotides beside the 2 most abundant nucleotides. "
"Unlike --min-variation, this criterion only needs to be met for 1 sample "
"to permit modification of the position in all samples of the experiment. "
"For example, consider an experiment with 2 samples and a parameter value of 0.01. "
"In Sample 1, A, C, and G are found at position 20 of a tRNA, "
"and A is represented by 95 reads, C by 4 reads, and G by 1 read. "
"The default parameter value of 0.01 is exactly met at the position thanks to G. "
"In Sample 2, A, C, G, and T are found at position 20 of the same tRNA seed, "
"and A is represented by 1000 reads, C by 100 reads, G by 2 reads, and T by 2 reads. "
"The third and fourth nucleotides don't meet the coverage threshold of 0.01, "
"but this is irrelevant for calling the modification, since Sample 1 met the criterion. "
"There is an important consideration due to the way this threshold is currently imposed. "
"Potential modification sites that do not meet the threshold "
"are not treated like single nucleotide variants in anvi-trnaseq: "
"they do not cause the seed sequence to be split "
"such that no seed contains a variant that was not deemed to be a modification. "
"Rather, candidate modification positions that do not meet this threshold "
"are retained in the seed BUT NOT REPORTED. "
"Therefore, we recommend rerunning this command with a parameter value of 0 "
"to inspect seeds for undisplayed variants (possible SNVs) "
"with a low level of third and fourth nucleotides."}
),
'min-indel-fraction': (
['--min-indel-fraction'],
{'default': 0.001,
'metavar': 'FLOAT',
'type': float,
'help': "This parameter controls which indels are reported in the tRNA-seq profile database. "
"Coverage of an indel in a sample must meet the minimum fraction of specific coverage. "
"Indel coverages are calculated separately for specific, nonspecific, and summed coverages."}
),
'specific-profile-db': (
['--specific-profile-db', '-s'],
{'metavar': 'PROFILE_DB',
'required': True,
'help': "The path to an anvi'o profile database containing specific coverage information on tRNA seeds. "
"`anvi-merge-trnaseq` generates a specific profile database from a tRNA-seq experiment."}
),
'nonspecific-profile-db': (
['--nonspecific-profile-db', '-n'],
{'metavar': 'PROFILE_DB',
'required': False,
'help': "The path to an anvi'o profile database containing nonspecific coverage information on tRNA seeds. "
"`anvi-merge-trnaseq` optionally generates a nonspecific profile database from a tRNA-seq experiment."}
),
'seeds-specific-txt': (
['--seeds-specific-txt', '-s'],
{'metavar': 'TEXT_FILE',
'required': True,
'help': "A tab-delimited text file containing data on tRNA seeds including specific coverages. "
"`anvi-tabulate-trnaseq` generates this file from anvi'o tRNA-seq databases."}
),
'seeds-nonspecific-txt': (
['--seeds-nonspecific-txt', '-n'],
{'metavar': 'TEXT_FILE',
'required': False,
'help': "A tab-delimited text file containing data on tRNA seeds including nonspecific coverages. "
"`anvi-tabulate-trnaseq` generates this file from anvi'o tRNA-seq databases."}
),
'modifications-txt': (
['--modifications-txt', '-m'],
{'metavar': 'TEXT_FILE',
'required': True,
'help': "A tab-delimited text file containing modification data on tRNA seeds. "
"`anvi-tabulate-trnaseq` generates this file from anvi'o tRNA-seq databases."}
),
'stats-to-summarize': (
['--stats-to-summarize', '-S'],
{'default': None,
'metavar': 'STATS',
'type': str,
'help': "Use this flag to indicate which statistics you want summarized, as "
"a comma-separated list. The default stats are 'detection' and "
"'mean_coverage_Q2Q3'. To see a list of available stats, use this flag "
"and provide an absolutely ridiculous string after it (we suggest 'cattywampus', but you do you)."}
)
}
# two functions that works with the dictionary above.
def A(param_id, exclude_param=None):
if exclude_param:
return [p for p in D[param_id][0] if p != exclude_param]
else:
return D[param_id][0]
def K(param_id, params_dict={}):
kwargs = copy.deepcopy(D[param_id][1])
for key in params_dict:
kwargs[key] = params_dict[key]
return kwargs
# The rest of this file is composed of code that responds to '-v' or '--version' calls from clients,
# and provides access to the database version numbers for all anvi'o modules.
import anvio.tables as t
from anvio.terminal import Run
run = Run()
def set_version():
return anvio_version, \
anvio_codename, \
t.contigs_db_version, \
t.pan_db_version, \
t.profile_db_version, \
t.genes_db_version, \
t.auxiliary_data_version, \
t.genomes_storage_vesion, \
t.structure_db_version, \
t.metabolic_modules_db_version, \
t.trnaseq_db_version
def get_version_tuples():
return [("Anvi'o version", "%s (v%s)" % (__codename__, __version__)),
("Profile DB version", __profile__version__),
("Contigs DB version", __contigs__version__),
("Genes DB version", __genes__version__),
("Auxiliary data storage version", __auxiliary_data_version__),
("Pan DB version", __pan__version__),
("Genome data storage version", __genomes_storage_version__),
("Structure DB version", __structure__version__),
("KEGG Modules DB version", __kegg_modules_version__),
("tRNA-seq DB version", __trnaseq__version__)]
def print_version():
run.info("Anvi'o", "%s (v%s)" % (__codename__, __version__), mc='green', nl_after=1)
run.info("Profile database", __profile__version__)
run.info("Contigs database", __contigs__version__)
run.info("Pan database", __pan__version__)
run.info("Genome data storage", __genomes_storage_version__)
run.info("Auxiliary data storage", __auxiliary_data_version__)
run.info("Structure database", __structure__version__)
run.info("Metabolic modules database", __kegg_modules_version__)
run.info("tRNA-seq database", __trnaseq__version__, nl_after=1)
__version__, \
__codename__, \
__contigs__version__, \
__pan__version__, \
__profile__version__, \
__genes__version__, \
__auxiliary_data_version__, \
__genomes_storage_version__ , \
__structure__version__, \
__kegg_modules_version__, \
__trnaseq__version__ = set_version()
if '-v' in sys.argv or '--version' in sys.argv:
print_version()
sys.exit()
|
merenlab/anvio
|
anvio/__init__.py
|
Python
|
gpl-3.0
| 204,703
|
[
"BLAST",
"Bowtie"
] |
b3ce44d996edb7f664f63dfbdb401a3e77d1da2c271281a88a2f642b41f0e3d5
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)*(high - low)/2.0/pi + low
mask = (S == .0) * (C == .0)
if mask.ndim > 0:
res[mask] = np.nan
return res
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/scipy/stats/morestats.py
|
Python
|
apache-2.0
| 94,567
|
[
"Gaussian"
] |
6a45eea4ded3b7435a56703ac27bc8022a745fbc6b6fb4ac6123f26498a44304
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib.utils2 import i18n
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import SHORT_MONS
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://iiddl.com'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/', VIDEO_TYPES.TVSHOW: '/category/tv-shows/'}
EXCLUDE_LINKS = ['adf.ly', urlparse.urlparse(BASE_URL).hostname]
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return '2DDL'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
pattern = '<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
else:
pattern = '<hr\s*/>\s*<strong>(.*?)</strong>.*?<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
for match in re.finditer(pattern, html, re.DOTALL):
if video.video_type == VIDEO_TYPES.MOVIE:
links = match.group(1)
match = re.search('<h2>\s*<a[^>]+>(.*?)</a>', html)
if match:
title = match.group(1)
else:
title = ''
else:
title, links = match.groups()
for match in re.finditer('href="([^"]+)', links):
stream_url = match.group(1).lower()
if any(link in stream_url for link in EXCLUDE_LINKS): continue
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.blog_get_quality(video, title, host)
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
hosters.append(hoster)
return hosters
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="60" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
return settings
def _get_episode_url(self, show_url, video):
force_title = scraper_utils.force_title(video)
title_fallback = kodi.get_setting('title-fallback') == 'true'
norm_title = scraper_utils.normalize_title(video.ep_title)
page_url = [show_url]
too_old = False
while page_url and not too_old:
url = urlparse.urljoin(self.base_url, page_url[0])
html = self._http_get(url, require_debrid=True, cache_limit=1)
posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
for post in posts:
if self.__too_old(post):
too_old = True
break
if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
match = re.search('<a\s+href="([^"]+)[^>]+>(.*?)</a>', post)
if match:
url, title = match.groups()
if not force_title:
if scraper_utils.release_check(video, title, require_title=False):
return scraper_utils.pathify_url(url)
else:
if title_fallback and norm_title:
match = re.search('</strong>(.*?)</p>', post)
if match and norm_title == scraper_utils.normalize_title(match.group(1)):
return scraper_utils.pathify_url(url)
page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = urlparse.urljoin(self.base_url, '/search/')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, require_debrid=True, cache_limit=1)
if video_type == VIDEO_TYPES.TVSHOW:
seen_urls = {}
for post in dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'}):
if CATEGORIES[video_type] not in post: continue
match = re.search('<span>\s*TAGS:\s*</span>\s*<a\s+href="([^"]+)[^>]+>([^<]+)', post, re.I)
if match:
show_url, match_title = match.groups()
if show_url in seen_urls: continue
result = {'url': scraper_utils.pathify_url(show_url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
seen_urls[show_url] = result
results.append(result)
elif video_type == VIDEO_TYPES.MOVIE:
headings = re.findall('<h2>\s*<a\s+href="([^"]+)[^>]+>(.*?)</a>', html)
posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
norm_title = scraper_utils.normalize_title(title)
for heading, post in zip(headings, posts):
if CATEGORIES[video_type] not in post or self.__too_old(post): continue
post_url, post_title = heading
meta = scraper_utils.parse_movie_link(post_title)
full_title = '%s [%s] (%sp)' % (meta['title'], meta['extra'], meta['height'])
match_year = meta['year']
match_norm_title = scraper_utils.normalize_title(meta['title'])
if (match_norm_title in norm_title or norm_title in match_norm_title) and (not year or not match_year or year == match_year):
result = {'url': scraper_utils.pathify_url(post_url), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year}
results.append(result)
return results
def __too_old(self, post):
try:
filter_days = datetime.timedelta(days=int(kodi.get_setting('%s-filter' % (self.get_name()))))
if filter_days:
today = datetime.date.today()
match = re.search('<a[^>]+title="posting time[^"]*">(.*?)\s+(\d+)\s*(\d{2,4})<', post)
mon_name, post_day, post_year = match.groups()
post_year = int(post_year)
if post_year < 2000: post_year += 2000
post_month = SHORT_MONS.index(mon_name) + 1
post_date = datetime.date(post_year, post_month, int(post_day))
if today - post_date > filter_days:
return True
except ValueError:
return False
return False
|
odicraig/kodi2odi
|
addons/plugin.video.salts/scrapers/2ddl_scraper.py
|
Python
|
gpl-3.0
| 8,193
|
[
"ADF"
] |
375408c81889bd5285a156b21aa50fc5a60558fa642483a70c29da71c98a2fa9
|
import random
import math
#################################################
# Parameters
#################################################
dt = 0.001 # simulation time step
t_rc = 0.02 # membrane RC time constant
t_ref = 0.002 # refractory period
t_pstc = 0.1 # post-synaptic time constant
N_A = 50 # number of neurons in first population
N_B = 40 # number of neurons in second population
N_samples = 100 # number of sample points to use when finding decoders
rate_A = 25, 75 # range of maximum firing rates for population A
rate_B = 50, 100 # range of maximum firing rates for population B
# the input to the system over time
def input(t):
return math.sin(t)
# the function to compute between A and B
def function(x):
return x*x
#################################################
# Step 1: Initialization
#################################################
# create random encoders for the two populations
encoder_A = [random.choice([-1,1]) for i in range(N_A)]
encoder_B = [random.choice([-1,1]) for i in range(N_B)]
def generate_gain_and_bias(count, intercept_low, intercept_high, rate_low, rate_high):
gain = []
bias = []
for i in range(count):
# desired intercept (x value for which the neuron starts firing
intercept = random.uniform(intercept_low, intercept_high)
# desired maximum rate (firing rate when x is maximum)
rate = random.uniform(rate_low, rate_high)
# this algorithm is specific to LIF neurons, but should
# generate gain and bias values to produce the desired
# intercept and rate
z = 1.0 / (1-math.exp((t_ref-(1.0/rate))/t_rc))
g = (1 - z)/(intercept - 1.0)
b = 1 - g*intercept
gain.append(g)
bias.append(b)
return gain,bias
# random gain and bias for the two populations
gain_A, bias_A = generate_gain_and_bias(N_A, -1, 1, rate_A[0], rate_A[1])
gain_B, bias_B = generate_gain_and_bias(N_B, -1, 1, rate_B[0], rate_B[1])
# a simple leaky integrate-and-fire model, scaled so that v=0 is resting
# voltage and v=1 is the firing threshold
def run_neurons(input,v,ref):
spikes=[]
for i in range(len(v)):
dV = dt * (input[i]-v[i]) / t_rc # the LIF voltage change equation
v[i] += dV
if v[i]<0: v[i]=0 # don't allow voltage to go below 0
if ref[i]>0: # if we are in our refractory period
v[i]=0 # keep voltage at zero and
ref[i]-=dt # decrease the refractory period
if v[i]>1: # if we have hit threshold
spikes.append(True) # spike
v[i] = 0 # reset the voltage
ref[i] = t_ref # and set the refractory period
else:
spikes.append(False)
return spikes
# measure the spike rate of a whole population for a given represented value x
def compute_response(x, encoder, gain, bias, time_limit=0.5):
N = len(encoder) # number of neurons
v = [0]*N # voltage
ref = [0]*N # refractory period
# compute input corresponding to x
input = []
for i in range(N):
input.append(x*encoder[i]*gain[i]+bias[i])
v[i]=random.uniform(0,1) # randomize the initial voltage level
count = [0]*N # spike count for each neuron
# feed the input into the population for a given amount of time
t = 0
while t<time_limit:
spikes=run_neurons(input, v, ref)
for i,s in enumerate(spikes):
if s: count[i]+=1
t += dt
return [c/time_limit for c in count] # return the spike rate (in Hz)
# compute the tuning curves for a population
def compute_tuning_curves(encoder, gain, bias):
# generate a set of x values to sample at
x_values=[i*2.0/N_samples - 1.0 for i in range(N_samples)]
# build up a matrix of neural responses to each input (i.e. tuning curves)
A=[]
for x in x_values:
response=compute_response(x, encoder, gain, bias)
A.append(response)
return x_values, A
# compute decoders
import numpy
def compute_decoder(encoder, gain, bias, function=lambda x:x):
# get the tuning curves
x_values,A = compute_tuning_curves(encoder, gain, bias)
# get the desired decoded value for each sample point
value=numpy.array([[function(x)] for x in x_values])
# find the optimum linear decoder
A=numpy.array(A).T
Gamma=numpy.dot(A, A.T)
Upsilon=numpy.dot(A, value)
Ginv=numpy.linalg.pinv(Gamma)
decoder=numpy.dot(Ginv,Upsilon)/dt
return decoder
# find the decoders for A and B
decoder_A=compute_decoder(encoder_A, gain_A, bias_A, function=function)
decoder_B=compute_decoder(encoder_B, gain_B, bias_B)
# compute the weight matrix
weights=numpy.dot(decoder_A, [encoder_B])
#################################################
# Step 2: Running the simulation
#################################################
v_A = [0.0]*N_A # voltage for population A
ref_A = [0.0]*N_A # refractory period for population A
input_A = [0.0]*N_A # input for population A
v_B = [0.0]*N_B # voltage for population B
ref_B = [0.0]*N_B # refractory period for population B
input_B = [0.0]*N_B # input for population B
# scaling factor for the post-synaptic filter
pstc_scale=1.0-math.exp(-dt/t_pstc)
# for storing simulation data to plot afterward
inputs=[]
times=[]
outputs=[]
ideal=[]
output=0.0 # the decoded output value from population B
t=0
while t<10.0:
# call the input function to determine the input value
x=input(t)
# convert the input value into an input for each neuron
for i in range(N_A):
input_A[i]=x*encoder_A[i]*gain_A[i]+bias_A[i]
# run population A and determine which neurons spike
spikes_A=run_neurons(input_A, v_A, ref_A)
# decay all of the inputs (implementing the post-synaptic filter)
for j in range(N_B):
input_B[j]*=(1.0-pstc_scale)
# for each neuron that spikes, increase the input current
# of all the neurons it is connected to by the synaptic
# connection weight
for i,s in enumerate(spikes_A):
if s:
for j in range(N_B):
input_B[j]+=weights[i][j]*pstc_scale
# compute the total input into each neuron in population B
# (taking into account gain and bias)
total_B=[0]*N_B
for j in range(N_B):
total_B[j]=gain_B[j]*input_B[j]+bias_B[j]
# run population B and determine which neurons spike
spikes_B=run_neurons(total_B, v_B, ref_B)
# for each neuron in B that spikes, update our decoded value
# (also applying the same post-synaptic filter)
output*=(1.0-pstc_scale)
for j,s in enumerate(spikes_B):
if s:
output+=decoder_B[j][0]*pstc_scale
print t, output
times.append(t)
inputs.append(x)
outputs.append(output)
ideal.append(function(x))
t+=dt
#################################################
# Step 3: Plot the results
#################################################
x,A = compute_tuning_curves(encoder_A, gain_A, bias_A)
x,B = compute_tuning_curves(encoder_B, gain_B, bias_B)
import pylab
pylab.figure()
pylab.plot(x, A)
pylab.title('Tuning curves for population A')
pylab.figure()
pylab.plot(x, B)
pylab.title('Tuning curves for population B')
pylab.figure()
pylab.plot(times, inputs, label='input')
pylab.plot(times, ideal, label='ideal')
pylab.plot(times, outputs, label='output')
pylab.title('Simulation results')
pylab.legend()
pylab.show()
|
harshkothari410/snn-image-segmentation
|
nef.py
|
Python
|
mit
| 7,869
|
[
"NEURON"
] |
c553b9b328d36854e0b1c52016cc80ab9cc14c06ad325ad3e22740e30d0d5b24
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from pyspark.ml.linalg import DenseVector
from bigdl.dllib.feature.common import ChainedPreprocessing, FeatureSet
from bigdl.dllib.feature.image import *
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.dllib.keras.optimizers import Adam
from bigdl.orca.tfpark import TFNet, TFOptimizer
import tensorflow as tf
import numpy as np
import os
from bigdl.orca.tfpark import KerasModel, TFDataset
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def single_parse_fn(e):
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/class/label': tf.FixedLenFeature(
[1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
}
items_to_handlers = {
'image': tf.contrib.slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
'label': tf.contrib.slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
}
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
results = decoder.decode(e)
if len(results[0].shape) > 0:
feature = results[0]
label = results[1]
else:
feature = results[1]
label = results[0]
return feature, label
def parse_fn(example):
results = tf.map_fn(single_parse_fn, example, dtype=(tf.uint8, tf.int64))
return tf.to_float(results[0]), results[1]
class TestTFDataset(ZooTestCase):
def get_raw_image_set(self, with_label):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
if with_label:
image_folder = os.path.join(resource_path, "cat_dog")
else:
image_folder = os.path.join(resource_path, "cat_dog/*")
from bigdl.dllib.feature.image import ImageSet
image_set = ImageSet.read(image_folder, with_label=with_label, sc=get_spark_context(),
one_based_label=False)
return image_set
def setup_method(self, method):
tf.keras.backend.clear_session()
super(TestTFDataset, self).setup_method(method)
def create_model(self):
data = tf.keras.layers.Input(shape=[10])
x = tf.keras.layers.Flatten()(data)
x = tf.keras.layers.Dense(10, activation='relu')(x)
predictions = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.models.Model(inputs=data, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def create_training_dataset(self):
np.random.seed(20)
x = np.random.rand(20, 10)
y = np.random.randint(0, 2, (20))
rdd_x = self.sc.parallelize(x)
rdd_y = self.sc.parallelize(y)
rdd = rdd_x.zip(rdd_y)
dataset = TFDataset.from_rdd(rdd,
features=(tf.float32, [10]),
labels=(tf.int32, []),
batch_size=4,
val_rdd=rdd
)
return dataset
def test_dataset_without_batch(self):
x = np.random.rand(20, 10)
y = np.random.randint(0, 2, (20))
rdd_x = self.sc.parallelize(x)
rdd_y = self.sc.parallelize(y)
rdd = rdd_x.zip(rdd_y)
dataset = TFDataset.from_rdd(rdd,
features=(tf.float32, [10]),
labels=(tf.int32, []),
names=["features", "labels"],
val_rdd=rdd
)
keras_model = self.create_model()
model = KerasModel(keras_model)
self.intercept(lambda: model.fit(dataset),
"The batch_size of TFDataset must be" +
" specified when used in KerasModel fit.")
dataset = TFDataset.from_rdd(rdd,
features=(tf.float32, [10]),
labels=(tf.int32, []),
names=["features", "labels"],
)
self.intercept(lambda: model.evaluate(dataset),
"The batch_per_thread of TFDataset must be " +
"specified when used in KerasModel evaluate.")
dataset = TFDataset.from_rdd(rdd_x,
features=(tf.float32, [10]),
names=["features", "labels"],
)
self.intercept(lambda: model.predict(dataset),
"The batch_per_thread of TFDataset must be" +
" specified when used in KerasModel predict.")
def create_image_model(self):
data = tf.keras.layers.Input(shape=[224, 224, 3])
x = tf.keras.layers.Flatten()(data)
predictions = tf.keras.layers.Dense(10, activation='softmax')(x)
model = tf.keras.models.Model(inputs=data, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return KerasModel(model)
def create_image_set(self, with_label):
image_set = self.get_raw_image_set(with_label)
transformer = ChainedPreprocessing([ImageResize(256, 256),
ImageRandomCrop(224, 224, True),
ImageMatToTensor(format="NHWC"),
ImageSetToSample(input_keys=["imageTensor"],
target_keys=["label"]
if with_label else None)])
image_set = image_set.transform(transformer)
return image_set
def create_train_features_Set(self):
image_set = self.get_raw_image_set(with_label=True)
feature_set = FeatureSet.image_frame(image_set.to_image_frame())
train_transformer = ChainedPreprocessing([ImageBytesToMat(),
ImageResize(256, 256),
ImageRandomCrop(224, 224),
ImageRandomPreprocessing(ImageHFlip(), 0.5),
ImageChannelNormalize(
0.485, 0.456, 0.406,
0.229, 0.224, 0.225),
ImageMatToTensor(to_RGB=True, format="NHWC"),
ImageSetToSample(input_keys=["imageTensor"],
target_keys=["label"])
])
feature_set = feature_set.transform(train_transformer)
feature_set = feature_set.transform(ImageFeatureToSample())
return feature_set
def test_training_for_imageset(self):
model = self.create_image_model()
image_set = self.create_image_set(with_label=True)
training_dataset = TFDataset.from_image_set(image_set,
image=(tf.float32, [224, 224, 3]),
label=(tf.int32, [1]),
batch_size=4)
model.fit(training_dataset)
def test_training_for_feature_set(self):
model = self.create_image_model()
feature_set = self.create_train_features_Set()
training_dataset = TFDataset.from_feature_set(feature_set,
features=(tf.float32, [224, 224, 3]),
labels=(tf.int32, [1]),
batch_size=8)
model.fit(training_dataset)
def test_evaluation_for_imageset(self):
model = self.create_image_model()
image_set = self.create_image_set(with_label=True)
eval_dataset = TFDataset.from_image_set(image_set,
image=(tf.float32, [224, 224, 3]),
label=(tf.int32, [1]),
batch_per_thread=1)
model.evaluate(eval_dataset)
def test_predict_for_imageset(self):
model = self.create_image_model()
image_set = self.create_image_set(with_label=False)
predict_dataset = TFDataset.from_image_set(image_set,
image=(tf.float32, [224, 224, 3]),
batch_per_thread=1)
results = model.predict(predict_dataset).get_predict().collect()
assert all(r[1] is not None for r in results)
def test_gradient_clipping(self):
data = tf.keras.layers.Input(shape=[10])
x = tf.keras.layers.Flatten()(data)
x = tf.keras.layers.Dense(10, activation='relu')(x)
predictions = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.models.Model(inputs=data, outputs=predictions)
model.compile(optimizer=tf.keras.optimizers.SGD(lr=1, clipvalue=1e-8),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model = KerasModel(model)
pre_weights = model.get_weights()
dataset = self.create_training_dataset()
# 5 iterations
model.fit(dataset)
current_weight = model.get_weights()
np.all(np.abs((current_weight[0] - pre_weights[0])) < 1e-7)
def test_tf_dataset_with_list_feature(self):
np.random.seed(20)
x = np.random.rand(20, 10)
y = np.random.randint(0, 2, (20))
rdd_x = self.sc.parallelize(x)
rdd_y = self.sc.parallelize(y)
rdd = rdd_x.zip(rdd_y)
dataset = TFDataset.from_rdd(rdd,
features=[(tf.float32, [10]), (tf.float32, [10])],
labels=(tf.int32, []),
batch_size=4,
val_rdd=rdd
)
for idx, tensor in enumerate(dataset.feature_tensors):
assert tensor.name == "list_input_" + str(idx) + ":0"
def test_tfdataset_with_string_rdd(self):
string_rdd = self.sc.parallelize(["123", "456"], 1)
ds = TFDataset.from_string_rdd(string_rdd, batch_per_thread=1)
input_tensor = tf.placeholder(dtype=tf.string, shape=(None,))
output_tensor = tf.string_to_number(input_tensor)
with tf.Session() as sess:
tfnet = TFNet.from_session(sess, inputs=[input_tensor], outputs=[output_tensor])
result = tfnet.predict(ds).collect()
assert result[0] == 123
assert result[1] == 456
def test_tfdataset_with_tfrecord(self):
train_path = os.path.join(resource_path, "tfrecord/mnist_train.tfrecord")
test_path = os.path.join(resource_path, "tfrecord/mnist_test.tfrecord")
dataset = TFDataset.from_tfrecord_file(self.sc, train_path,
batch_size=16,
validation_file_path=test_path)
raw_bytes = dataset.tensors[0]
images, labels = parse_fn(raw_bytes)
flat = tf.layers.flatten(images)
logits = tf.layers.dense(flat, 10)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits,
labels=labels))
opt = TFOptimizer.from_loss(loss, Adam())
opt.optimize()
def test_tfdataset_with_tf_data_dataset_which_requires_table(self):
keys = [1, 0, -1]
dataset = tf.data.Dataset.from_tensor_slices([1, 2, -1, 5] * 40)
table = tf.contrib.lookup.HashTable(
initializer=tf.contrib.lookup.KeyValueTensorInitializer(
keys=keys, values=list(reversed(keys))),
default_value=100)
dataset = dataset.map(table.lookup)
def transform(x):
float_x = tf.to_float(x)
return float_x, 1
dataset = dataset.map(transform)
dataset = TFDataset.from_tf_data_dataset(dataset, batch_size=16)
seq = tf.keras.Sequential(
[tf.keras.layers.Flatten(input_shape=()),
tf.keras.layers.Dense(10, activation="softmax")])
seq.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model = KerasModel(seq)
model.fit(dataset)
def test_tfdataset_with_tf_data_dataset_which_contains_bool(self):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(102, 28, 28, 1),
np.random.randint(0, 10, size=(102,)),
np.ones(shape=(102, 28, 28, 1),
dtype=np.bool)))
dataset = TFDataset.from_tf_data_dataset(dataset, batch_size=16)
feature, labels, mask = dataset.tensors
float_mask = tf.to_float(mask)
masked_feature = tf.to_float(feature) * float_mask
flatten = tf.layers.flatten(masked_feature)
logits = tf.layers.dense(flatten, 10)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits,
labels=labels))
opt = TFOptimizer.from_loss(loss, Adam())
opt.optimize()
def test_tfdataset_with_tf_data_dataset(self):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(102, 28, 28, 1),
np.random.randint(0, 10, size=(102,))))
dataset = dataset.map(lambda feature, label: (tf.to_float(feature), label))
dataset = TFDataset.from_tf_data_dataset(dataset, batch_size=16)
seq = tf.keras.Sequential(
[tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(10, activation="softmax")])
seq.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model = KerasModel(seq)
model.fit(dataset)
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(102, 28, 28, 1),
np.random.randint(0, 10, size=(102,))))
dataset = dataset.map(lambda feature, label: (tf.to_float(feature),
label))
dataset = TFDataset.from_tf_data_dataset(dataset, batch_per_thread=16)
model.evaluate(dataset)
def check_dataset(self, create_ds):
seq = tf.keras.Sequential(
[tf.keras.layers.Flatten(input_shape=(20,)),
tf.keras.layers.Dense(10, activation="softmax")])
seq.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model = KerasModel(seq)
model.fit(create_ds("train"))
model.predict(create_ds("predict")).collect()
model.evaluate(create_ds("evaluate"))
def make_create_ds_fn(self, train_df, val_df):
def create_ds(mode):
if mode == "train":
dataset = TFDataset.from_dataframe(train_df,
feature_cols=["feature"],
labels_cols=["label"],
batch_size=32,
validation_df=val_df)
elif mode == "predict":
dataset = TFDataset.from_dataframe(val_df,
feature_cols=["feature"],
batch_per_thread=32)
elif mode == "evaluate":
dataset = TFDataset.from_dataframe(val_df,
feature_cols=["feature"],
labels_cols=["label"],
batch_per_thread=32)
else:
raise ValueError("unrecognized mode: {}".format(mode))
return dataset
return create_ds
def test_tfdataset_with_dataframe(self):
rdd = self.sc.range(0, 1000)
df = rdd.map(lambda x: (DenseVector(np.random.rand(20).astype(np.float)),
x % 10)).toDF(["feature", "label"])
train_df, val_df = df.randomSplit([0.7, 0.3])
create_ds = self.make_create_ds_fn(train_df, val_df)
self.check_dataset(create_ds)
def test_tfdataset_with_dataframe_arraytype(self):
rdd = self.sc.range(0, 1000)
df = rdd.map(lambda x: ([0.0] * 20, x % 10)).toDF(["feature", "label"])
train_df, val_df = df.randomSplit([0.7, 0.3])
create_ds = self.make_create_ds_fn(train_df, val_df)
self.check_dataset(create_ds)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/tfpark/test_tf_dataset.py
|
Python
|
apache-2.0
| 18,397
|
[
"ORCA"
] |
56334d178a62f2630655ed98f2a1f3424ec3da60b6552a7d55e425855240cdb7
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
reader = chigger.exodus.ExodusReader('../../input/variable_range.e')
result = chigger.exodus.ExodusResult(reader, representation='wireframe')
cell_result = chigger.exodus.LabelExodusResult(result, label_type='cell', text_color=[0,0,1], font_size=12)
window = chigger.RenderWindow(result, cell_result, size=[300,300], test=True)
window.write('cells.png')
window.start()
|
liuwenf/moose
|
python/chigger/tests/exodus/labels/cells.py
|
Python
|
lgpl-2.1
| 1,299
|
[
"MOOSE"
] |
8201a84ce69ebea2216f80c9b2f9e92fbd5340b2d449efb8f4652ef1e5ec0b04
|
import json
import re
current_index = 0
def new_item_stack(input):
if type(input) == str:
return (input, 1, 0)
elif type(input) == list:
if len(input) == 1:
input.append(1)
if len(input) == 2:
input.append(0)
return input
def new_item_stacks(input, shapeless=False):
output = []
for item_stack in input:
pattern_char = None
if not shapeless:
pattern_char = item_stack.pop(0)
item_stack = new_item_stack(item_stack)
if not shapeless:
item_stack.insert(0, pattern_char)
output.append(item_stack)
return output
def addShapedRecipe(output, pattern, input):
global current_index
output = new_item_stack(output)
input = new_item_stacks(input)
keys = {}
for item_stack in input:
keys[item_stack[0]] = {
'item': item_stack[1],
'count': item_stack[2],
'data': item_stack[3]
}
json.dump({
'type': 'minecraft:crafting_shaped',
'pattern': pattern,
'key': keys,
'result': {
'item': output[0],
'count': output[1],
'data': output[2]
}
}, open('{}_{}_{}_{}.json'.format(re.compile('^.*?:(.*)$').search(output[0]).group(1), output[1], output[2], current_index), 'w'))
current_index += 1
def addShapelessRecipe(output, input):
global current_index
output = new_item_stack(output)
input = new_item_stacks(input, True)
ingredients = []
for item_stack in input:
ingredients.append({
'item': item_stack[0],
'count': item_stack[1],
'data': item_stack[2]
})
json.dump({
'type': 'minecraft:crafting_shapeless',
'ingredients': ingredients,
'result': {
'item': output[0],
'count': output[1],
'data': output[2]
}
}, open('{}_{}_{}_{}.json'.format(re.compile('^.*?:(.*)$').search(output[0]).group(1), output[1], output[2], current_index), 'w'))
current_index += 1
crystal_blocks_colored = ['crystalblocks:crystal_block_black', 'crystalblocks:crystal_block_red', 'crystalblocks:crystal_block_green', 'crystalblocks:crystal_block_brown', 'crystalblocks:crystal_block_blue', 'crystalblocks:crystal_block_purple', 'crystalblocks:crystal_block_cyan', 'crystalblocks:crystal_block_light_gray', 'crystalblocks:crystal_block_gray', 'crystalblocks:crystal_block_pink', 'crystalblocks:crystal_block_lime', 'crystalblocks:crystal_block_yellow', 'crystalblocks:crystal_block_light_blue', 'crystalblocks:crystal_block_magenta', 'crystalblocks:crystal_block_orange', 'crystalblocks:crystal_block_white']
addShapedRecipe(['crystalblocks:crystal_block'], ['###', '###', '###'], [['#', 'crystalblocks:crystal']])
addShapedRecipe(['crystalblocks:crystal', 9], ['#'], [['#', 'crystalblocks:crystal_block']])
for i in range(16):
addShapedRecipe([crystal_blocks_colored[i], 8], ['###', '#X#', '###'], [['#', 'crystalblocks:crystal_block'], ['X', 'minecraft:dye', 1, i]])
addShapedRecipe(['crystalblocks:crystal_colored', 8, i], ['###', '#X#', '###'], [['#', 'crystalblocks:crystal'], ['X', 'minecraft:dye', 1, i]])
addShapedRecipe([crystal_blocks_colored[i]], ['###', '###', '###'], [['#', 'crystalblocks:crystal_colored', 1, i]])
addShapedRecipe(['crystalblocks:crystal_colored', 9, i], ['#'], [['#', crystal_blocks_colored[i], 1, 0]])
if i < 15:
addShapelessRecipe(['crystalblocks:crystal_block', 1, i + 1], [['crystalblocks:crystal_block', 1, i], ['minecraft:glowstone_dust']])
for j in range(16):
addShapelessRecipe([crystal_blocks_colored[j], 1, i + 1], [[crystal_blocks_colored[j], 1, i], ['minecraft:glowstone_dust']])
|
19Spleen/CrystalBlocks
|
generate_recipes.py
|
Python
|
gpl-3.0
| 3,803
|
[
"CRYSTAL"
] |
fbb07b99049cb568ffe91ef4c239c361662f4236fd2350fb4172d35ba7ee9a21
|
#!/usr/bin/env python
# Take the json in the file given as first argument and convert it to the JSON
# format needed for import. Should do all cleanup of data and removal of
# unneeded entries too.
import sys
import os
import json
import re
import urllib
script_dir = os.path.basename(__file__)
base_dir = os.path.join(script_dir, "../../../../..")
app_path = os.path.abspath(base_dir)
sys.path.append(app_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pombola.settings.south_africa'
from django.utils.text import slugify
from django.db.models import Q
from pombola.core.models import Person
class Converter(object):
groupings = []
ditto_marks = [
"\"",
"\" \"",
]
# Change this to True to enable little bits of helper code for finding new
# slug corrections:
finding_slug_corrections = False
slug_corrections = {
"albert-theo-fritz": "albert-fritz",
"albertinah-nomathuli-luthuli": "a-n-luthuli",
"amos-matila": "amos-gerald-matila",
"andre-gaum": "andre-hurtley-gaum",
"andrew-louw": "a-louw",
"anele-mda": "a-mda",
"anton-alberts": "anton-de-waal-alberts",
"archibold-mzuvukile-figlan": "a-m-figlan",
"archibold-nyambi": "archibold-jomo-nyambi",
"arthur-ainslie": "arthur-roy-ainslie",
"bafunani-aaron-mnguni": "bafumani-aaron-mnguni",
"barbara-anne-hogan": "b-a-hogan",
"barbara-thompson": "barbara-thomson",
"bertha-mabe": "bertha-peace-mabe",
"beryl-ferguson": "beryl-delores-ferguson",
"beverley-lynnette-abrahams": "beverley-lynette-abrahams",
"bhekizizwe-abram-radebe": "bhekiziswe-abram-radebe",
"bonginkosi-dhlamini": "bonginkosi-dlamini",
"bonisile-alfred-nesi": "bonisile-nesi",
"busisiwe-mncube": "busisiwe-veronica-mncube",
"butana-moses-komphela": "b-m-komphela",
"buyelwa-patience-sonjica": "b-p-sonjica",
"buyiswa-diemu": "buyiswa-cornelia-diemu",
"buyiswa-diemublaai": "buyiswa-cornelia-diemu",
"cassel-mathale": "cassel-charlie-mathale",
"charel-de-beer": "charel-jacobus-de-beer",
"constance-mosimane": "constance-kedibone-kelekegile-mosimane",
"cq-madlopha": "celiwe-qhamkile-madlopha",
"crosby-mpozo-moni": "crosby-mpoxo-moni",
"dalitha-boshigo": "dalitha-fiki-boshigo",
"danny-montsitsi": "sediane-danny-montsitsi",
"dennis-bloem": "dennis-victor-bloem",
"dennis-gamede": "dumisani-dennis-gamede",
"desiree-van-der-walt": "d-van-der-walt",
"dina-deliwa-pule": "dina-deliwe-pule",
"dirk-feldman": "dirk-benjamin-feldman",
"dj-stubbe": "dirk-jan-stubbe",
"doris-nompendlko-ngcengwane": "nompendlko-doris-ngcengwane",
"dudu-chili": "dudu-olive-chili",
"duduzile-sibiya": "dudu-sibiya",
"dumisani-ximbi": "dumsani-livingstone-ximbi",
"ebrahim-ebrahim": "ebrahim-ismail-ebrahim",
"elza-van-lingen": "elizabeth-christina-van-lingen",
"emmanuel-nkosinathi-mandlenkosi-mthethwa": "emmanuel-nkosinathi-mthethwa",
"enoch-godongwana": "e-godongwana",
"ernst-eloff": "ernst-hendrik-eloff",
"faith-bikani": "faith-claudine-bikani",
"gbd-mcintosh": "graham-brian-douglas-mc-intosh",
"gelana-sindane": "gelana-sarian-sindane",
"geoffery-quinton-mitchell-doidge": "g-q-m-doidge",
"geolatlhe-godfrey-oliphant": "gaolatlhe-godfrey-oliphant",
"geordin-hill-lewis": "geordin-gwyn-hill-lewis",
"george-boinamo": "george-gaolatlhe-boinamo",
"gloria-borman": "gloria-mary-borman",
"graham-peter-dalziel-mackenzie": "graham-peter-dalziel-mac-kenzie",
"gratitude-bulelani-magwanishe": "gratitude-magwanishe",
"gregory-krumbock": "gregory-rudy-krumbock",
"gwedoline-lindiwe-mahlangu-nkabinde": "g-l-mahlangu-nkabinde",
"helen-line": "helen-line-hendriks",
"hendrietta-bogopane-zulu": "hendrietta-ipeleng-bogopane-zulu",
"herman-groenewald": "hermanus-bernadus-groenewald",
"hildah-sizakele-msweli": "hilda-sizakele-msweli",
"isaac-mfundisi": "isaac-sipho-mfundisi",
"ismail-vadi": "i-vadi",
"jac-bekker": "jacobus-marthinus-g-bekker",
"james-lorimer": "james-robert-bourne-lorimer",
"jan-gunda": "jan-johannes-gunda",
"jf-smalle": "jacobus-frederik-smalle",
"johanna-fredrika-terblanche": "johanna-fredrika-juanita-terblanche",
"john-moepeng": "john-kabelo-moepeng",
"joseph-job-mc-gluwa": "joseph-job-mcgluwa",
"keith-muntuwenkosi-zondi": "k-m-zondi",
"kenneth-raselabe-meshoe": "kenneth-raselabe-joseph-meshoe",
"kennett-andrew-sinclair": "kenneth-andrew-sinclair",
"lekaba-jack-tolo": "l-j-tolo",
"lemias-buoang-mashile": "buoang-lemias-mashile",
"leonard-ramatlakana": "leonard-ramatlakane",
"liezl-van-der-merwe": "liezl-linda-van-der-merwe",
"lulama-mary-theresa-xingwana": "lulama-marytheresa-xingwana",
"lusizo-makhubela-mashele": "lusizo-sharon-makhubela-mashele",
"lydia-sindiswe-chikunga": "lydia-sindisiwe-chikunga",
"machejane-alina-rantsolase": "m-a-rantsolase",
"mafemane-makhubela": "mafemane-wilson-makhubela",
"maite-emely-nkoana-mashabane": "maite-emily-nkoana-mashabane",
"makgathatso-pilane-majake": "makgathatso-charlotte-chana-pilane-majake",
"makhenkezi-arnold-stofile": "m-a-stofile",
"makone-collen-maine": "mokoane-collen-maine",
"mandlenkosi-enock-mbili": "m-e-mbili",
"mark-harvey-steele": "m-h-steele",
"mary-anne-lindelwa-dunjwa": "mary-ann-lindelwa-dunjwa",
"masefako-dikgale": "masefako-clarah-digkale",
"matome-mokgobi": "matome-humphrey-mokgobi",
"mavis-nontsikelelo-magazi": "n-m-magazi",
"mavis-ntebaleng-matladi": "m-n-matladi",
"max-vuyisile-sisuslu": "max-vuyisile-sisulu",
"mbhazima-samuel-shilowa": "m-s-shilowa",
"mbuyiselo-jacobs": "mbuyiselo-patrick-jacobs",
"membathisi-mphumzi-shepherd-mdladlana": "m-m-s-mdladlana",
"meriam-phaliso": "meriam-nozibonelo-phaliso",
"michael-de-villiers": "michael-jacobs-roland-de-villiers",
"michael-james-ellis": "m-j-ellis",
"mmatlala-boroto": "mmatlala-grace-boroto",
"mninwa-mahlangu": "mninwa-johannes-mahlangu",
"mntombizodwa-florence-nyanda": "n-f-nyanda",
"mogi-lydia-moshodi": "moji-lydia-moshodi",
"mohammed-sayedali-shah": "mohammed-rafeek-sayedali-shah",
"mondli-gungubele": "m-gungubele",
"mosie-anthony-cele": "mosie-antony-cele",
"mpane-mohorosi": "mpane-martha-mohorosi",
"n-d-ntwanambi": "nosipho-dorothy-ntwanambi",
"nolitha-yvonne-vukuza-linda": "n-y-vukuza-linda",
"noluthando-agatha-mayende-sibiya": "n-a-mayende-sibiya",
"nomzamo-winnie-madikizela-mandela": "nomzamo-winfred-madikizela-mandela",
"nonkumbi-bertha-gxowa": "n-b-gxowa",
"ntombikhayise-nomawisile-sibhida": "ntombikayise-nomawisile-sibhida",
"ntombikhayise-nomawisile-sibhidla": "ntombikayise-nomawisile-sibhida",
"obed-bapela": "kopeng-obed-bapela",
"onel-de-beer": "onell-de-beer",
"pakishe-motsoaledi": "pakishe-aaron-motsoaledi",
"patrick-chauke": "h-p-chauke",
"paul-mashatile": "shipokasa-paulus-mashatile",
"pearl-petersen-maduna": "pearl-maduna",
"petronella-catharine-duncan": "petronella-catherine-duncan",
"petrus-johannes-christiaan-pretorius": "p-j-c-pretorius",
"phillip-david-dexter": "p-d-dexter",
"rachel-rasmeni": "rachel-nomonde-rasmeni",
"radhakrishna-lutchmana-padayachie": "r-l-padayachie",
"raseriti-tau": "raseriti-johannes-tau",
"rebecca-m-motsepe": "rebecca-mmakosha-motsepe",
"refilwe-junior-mashigo": "refilwe-modikanalo-mashigo",
"regina-lesoma": "regina-mina-mpontseng-lesoma",
"rejoice-thizwilondi-mabudafhasi": "thizwilondi-rejoyce-mabudafhasi",
"richard-baloyi": "masenyani-richard-baloyi",
"robert-lees": "robert-alfred-lees",
"roland-athol-trollip": "roland-athol-price-trollip",
"royith-bhoola": "royith-baloo-bhoola",
"salamuddi-abram": "salamuddi-salam-abram",
"sam-mazosiwe": "siphiwo-sam-mazosiwe",
"sanna-keikantseeng-molao-now-plaatjie": "sanna-keikantseeng-plaatjie",
"sanna-plaatjie": "sanna-keikantseeng-plaatjie",
"seeng-patricia-lebenya-ntanzi": "s-p-lebenya-ntanzi",
"sherphed-mayatula": "shepherd-malusi-mayatula",
"sherry-chen": "sheery-su-huei-cheng",
"sicelo-shiceka": "s-shiceka",
"siyabonga-cwele": "siyabonga-cyprian-cwele",
"stella-tembisa-ndabeni-abrahams": "stella-tembisa-ndabeni",
"suhla-james-masango": "s-j-masango",
"swaphi-h-plaatjie": "swaphi-hendrick-plaatjie",
"swaphi-plaatjie": "swaphi-hendrick-plaatjie",
"teboho-chaane": "teboho-edwin-chaane",
"thabo-makunyane": "thabo-lucas-makunyane",
"thandi-vivian-tobias-pokolo": "thandi-vivian-tobias",
"thembalani-waltemade-nxesi": "thembelani-waltermade-nxesi",
"tim-harris": "timothy-duncan-harris",
"tjheta-mofokeng": "tjheta-makwa-harry-mofokeng",
"tlp-nwamitwa-shilubana": "tinyiko-lwandlamuni-phillia-nwamitwa-shilubana",
"tovhowani-josephine-tshivhase": "t-j-tshivhase",
"trevor-john-bonhomme": "trevor-bonhomme",
"tshenuwani-simon-farisani": "t-s-farisani",
"tshiwela-elidah-lishivha": "tshiwela-elida-lishivha",
"velly-manzini": "velly-makasana-manzini",
"willem-faber": "willem-frederik-faber",
"willem-phillips-doman": "w-p-doman",
"zephroma-dubazana": "zephroma-sizani-dubazana",
"zephroma-sizani-dlamini-dubazana": "zephroma-sizani-dubazana",
"zisiwe-balindlela": "zisiwe-beauty-nosimo-balindlela",
"zoliswa-kota-fredericks": "zoliswa-albertina-kota-fredericks",
"zukiswa-rantho": "daphne-zukiswa-rantho",
"seiso-mohai": "seiso-joel-mohai",
"belinda-bozzoli-van-onsellen": "belinda-bozzoli",
"micheal-cardo": "michael-john-cardo",
"zephroma-dlamini-dubazana": "zephroma-sizani-dubazana",
"pravin-jamnadas-gordhan": "pravin-gordhan",
"barnard-joseph": "bernard-daniel-joseph",
"diane-kohler": "dianne-kohler-barnard",
"dean-macpherson": "dean-william-macpherson",
"thembekile-majola": "richard-majola",
"edwin-makue": "eddie-makue",
"mmoba-malatsi-seshoka": "mmoba-solomon-seshoka",
"suhla-masango": "bridget-staff-masango",
"lindiwe-maseko": "maseko-lindiwe",
"shipokosa-mashatile": "shipokasa-paulus-mashatile",
"comely-maxegwana": "humphrey-maxegwana",
"lungi-mnganga-gcabashe": "lungi-annette-mnganga-gcabashe",
"pumzile-mnguni": "phumzile-justice-mnguni",
"mohapi-mohapi": "mohapi-jihad-mohapi",
"charles-nqakula": "c-nqakula",
"bhekiziwe-radebe": "bhekiziswe-abram-radebe",
"david-ross": "david-christie-ross",
"olifile-sefako": "olefile-sefako",
"sheila-shope-sithole": "sheila-coleen-nkhensani-sithole",
"christiaan-smit": "christiaan-frederik-beyers-smit",
"makhotso-magdaline-sotyu": "makhotso-magdeline-sotyu",
"johnna-terblanche": "johanna-fredrika-juanita-terblanche",
"thandi-tobias-pokolo": "thandi-vivian-tobias",
"tshoganetso-tongwane-gasebonwe": "tshoganetso-mpho-adolphina-tongwane",
"shiella-xego-sovita": "sheilla-tembalam-xego-sovita",
"winile-zondi": "wp-zondi",
"lindiwe-zulu": "l-d-zulu",
"lungelwa-zwane": "ll-zwane",
"mamonare-chueu": "chueu-patricia",
"stanford-gana": "makashule-gana",
"hendrik-kruger": "hendrik-christiaan-crafford-kruger",
"dipuo-letsatsi-duba": "ms-letsatsi-duba-db",
"nomaindiya-mfeketo": "nomaindiya-cathleen-mfeketho",
"claudia-ndaba": "ndaba-nonhlanhla",
"maureen-scheepers": "m-scheepers",
"nomaindiya-cathleen-mfeketo": "nomaindiya-cathleen-mfeketho",
"tshoganetso-mpho-adolphina-gasebonwe": "tshoganetso-mpho-adolphina-gasebonwe-tongwane",
"mntomuhle-khawula": "m-khawula",
"thembekile-richard-majola": "richard-majola",
"natasha-mazzone": "natasha-wendy-anita-michael",
"zukiswa-ncitha": "zukiswa-veronica-ncitha",
"cathlene-labuschagne": "cathleen-labuschagne",
"tandi-gloria-mpambo-sibhukwana": "thandi-gloria-mpambo-sibhukwana",
"tandi-mpambo-sibhukwana": "thandi-gloria-mpambo-sibhukwana",
"marshall-mzingisi-dlamini": "mzingisi-marshall-dlamini",
"hlengiwe-octavia-maxon": "hlengiwe-octavia-hlophe",
"hlengiwe-maxon": "hlengiwe-octavia-hlophe",
"norbet-buthelezi": "sfiso-norbert-buthelezi",
"christian-hattingh": "chris-hattingh",
"karen-jooste-de-kock": "karen-de-kock",
"ntombovuyo-mente-nqweniso": "ntombovuyo-veronica-nqweniso",
"ockers-stefanus-terblanche": "ockert-stefanus-terblanche",
"patrick-maloyi": "nono-maloyi",
"ghaleb-cachalia": "ghaleb-cachalia",
"archibold-figlan": "a-m-figlan",
"hlengiwe-hlophe-maxon": "hlengiwe-octavia-hlophe",
"nkagisang-koni-mokgosi": "nkagisang-poppy-mokgosi",
"terrence-mpanza": "terence-skhumbuzo-mpanza",
"phoebe-abraham-ntantiso": "noxolo-abraham-ntantiso",
"hlengiwe-o-hlophe-mkhaliphi": "hlengiwe-octavia-hlophe",
"mohammed-hoosen": "mohammed-haniff-hoosen",
"rainey-hugo": "reiney-thamie-hugo",
"gwede-samson-mantashe": "gwede-mantashe",
"moses-masango": "moses-siphosezwe-amos-masango",
"joseph-mc-gluwa": "joseph-job-mcgluwa",
"lehlohonolo-mokoena": "lehlohonolo-goodwill-mokoena",
"busisiwe-ndlovu": "busisiwe-clarah-ndlovu",
"gwen-ngwenya": "amanda-ngwenya",
"neliswa-nkonyeni": "np-nkonyeni",
"hendrik-schmidt": "hendrik-cornelius-schmidt",
"zolile-xalisa": "zolile-roger-xalisa",
"thandiwe-alina-mfulo": "alina-mfulo",
"micheal-shackleton": "michael-stephen-shackleton",
#name changes confirmed in National Assembly membership document
"buyiswa-blaai": "buyiswa-cornelia-diemu",
"sanna-keikantseeng-molao": "sanna-keikantseeng-plaatjie",
# Garbage entries
"control-flag-ict": None,
}
category_sort_orders = {
"SHARES AND OTHER FINANCIAL INTERESTS": 1,
"REMUNERATED EMPLOYMENT OUTSIDE PARLIAMENT": 2,
"DIRECTORSHIP AND PARTNERSHIPS": 3,
"CONSULTANCIES OR RETAINERSHIPS": 4,
"SPONSORSHIPS": 5,
"GIFTS AND HOSPITALITY": 6,
"BENEFITS": 7,
"TRAVEL": 8,
"LAND AND PROPERTY": 9,
"PENSIONS": 10,
"CONTRACTS": 11,
"TRUSTS": 12,
"ENCUMBERANCES": 13,
}
def __init__(self, filename):
self.filename = filename
def convert(self):
data = self.extract_data_from_json()
self.extract_release(data)
self.extract_entries(data)
return self.produce_json()
def extract_release(self, data):
source_url = data['source']
year = data['year']
date = data['date']
source_filename = re.sub(r'.*/(.*?)\.pdf', r'\1', source_url)
source_name = urllib.unquote(source_filename).replace('_', ' ').strip()
self.release = {
"name": "Parliament Register of Members' Interests " + year,
"date": date,
"source_url": source_url,
}
def extract_entries(self, data):
for register_entry in data['register']:
for raw_category_name, entries in register_entry.items():
# we only care about entries that are arrays
if type(entries) != list:
continue
# go through all entries stripping off extra whitespace from
# keys and values
for entry in entries:
for key in entry.keys():
# correct common scraper heading error
key_to_use = key.strip()
if key_to_use == 'Benefits' and raw_category_name.strip() == "TRUSTS":
key_to_use = "Details Of Benefits"
entry[key_to_use] = entry.pop(key).strip()
if entry.get('No') == 'Nothing to disclose':
del entry['No']
# Need to be smart about values that are just '"' as these are dittos of the previous entries.
previous_entries = []
for entry in entries:
if len(previous_entries):
for key in entry.keys():
if entry[key] in self.ditto_marks:
for previous in reversed(previous_entries):
if key in previous:
entry[key] = previous[key]
break
# Replacement may not have been found, warn
# if entry[key] in self.ditto_marks:
# sys.stderr.write("----------- Could not find previous entry for ditto mark of '{0}'\n".format(key))
# sys.stderr.write(str(previous_entries) + "\n")
# sys.stderr.write(str(entry) + "\n")
previous_entries.append(entry)
# Filter out entries that are empty
entries = [ e for e in entries if len(e) ]
if len(entries) == 0:
continue
grouping = {
"release": self.release,
"entries": entries,
}
# Extract the category name we are interested in
category_name = raw_category_name.strip()
category_name = re.sub(r'^\d+\.\s*', '', category_name)
grouping['category'] = {
"sort_order": self.category_sort_orders[category_name],
"name": category_name,
}
# Work out who the person is
person_slug = self.mp_to_person_slug(register_entry['mp'])
if not person_slug:
continue # skip if no slug
grouping['person'] = {
"slug": person_slug
}
self.groupings.append(grouping)
# break # just for during dev
def mp_to_person_slug(self, mp):
muddled_name, party = re.search(r'^(.*)\s\(+(.*?)\)+', mp).groups()
name = re.sub(r'(.*?), (.*)', r'\2 \1', muddled_name)
slug = slugify(name)
# Check if there is a known correction for this slug
slug = self.slug_corrections.get(slug, slug)
# Sometimes we know we can't resolve the person
if slug is None:
return None
try:
person = Person.objects.get(slug=slug)
return person.slug
except Person.DoesNotExist:
try:
name_parts = re.findall(r'(.*?), (.*)', muddled_name)
person = Person.objects.get(Q(slug__contains=slugify(name_parts[0][0])) & Q(slug__contains=slugify(name_parts[0][1])))
return person.slug
except Person.DoesNotExist:
last_name = name.split(' ')[-1]
possible_persons = Person.objects.filter(legal_name__icontains=last_name)
if self.finding_slug_corrections and possible_persons.count() == 1:
possible_slug = possible_persons.all()[0].slug
self.slug_corrections[slug] = possible_slug
return possible_slug
for person in possible_persons:
print 'perhaps: "{0}": "{1}",'.format(slug, person.slug)
else:
print "no possible matches for {0}".format(slug)
raise Exception("Slug {0} not found, please find matching slug and add it to the slug_corrections".format(slug))
def produce_json(self):
data = self.groupings
combined_data = self.combine_data(data)
out = json.dumps(combined_data, indent=4, sort_keys=True)
return re.sub(r' *$', '', out, flags=re.M)
def combine_data(self, data):
"""
Manipulate the data so that there are no duplicates of person and
category, and sort data so that it is diff-able.
"""
sorted_data = sorted(
data,
key = lambda x: x['person']['slug'] + ':' + x['category']['name']
)
combined_data = []
for entry in sorted_data:
# check if the last entry of combined_data has same person and
# category. If so add entries to that, otherwise append whole thing.
if len(combined_data):
last_entry = combined_data[-1]
else:
last_entry = None
if last_entry and last_entry['person']['slug'] == entry['person']['slug'] and last_entry['category']['name'] == entry['category']['name']:
last_entry['entries'].extend(entry['entries'])
else:
combined_data.append(entry)
return combined_data
def extract_data_from_json(self):
with open(self.filename) as fh:
return json.load(fh)
if __name__ == "__main__":
converter = Converter(sys.argv[1])
output = converter.convert()
print output
if converter.finding_slug_corrections:
print "\n\n"
print "#### COPY THIS TO slug_corrections and s/null/None/ :) ####"
print "\n\n"
print json.dumps(converter.slug_corrections, indent=4, sort_keys=True)
print "\n\n"
|
mysociety/pombola
|
pombola/south_africa/data/members-interests/convert_to_import_json.py
|
Python
|
agpl-3.0
| 22,213
|
[
"Brian"
] |
a5c495c994d874af1c7d9c5d172b537e50ad045a3a7a93465583fc1899243132
|
"""
Module for Continuous-Time Recurrent Neural Networks.
This pure python version solves the differential equations
using a simple Forward-Euler method. For a higher precision
method, use the C++ extension with 4th order Runge-Kutta.
"""
from neat.nn import nn_pure as nn
try:
import psyco; psyco.full()
except ImportError:
pass
class CTNeuron(nn.Neuron):
""" Continuous-time neuron model based on:
Beer, R. D. and Gallagher, J.C. (1992).
Evolving Dynamical Neural Networks for Adaptive Behavior.
Adaptive Behavior 1(1):91-122.
"""
def __init__(self, neurontype, id = None, bias = 0.0, response = 1.0, activation_type = 'exp', tau = 1.0):
super(CTNeuron, self).__init__(neurontype, id, bias, response, activation_type)
# decay rate
self.__tau = tau
# needs to set the initial state (initial condition for the ODE)
self.__state = 0.1 #TODO: Verify what's the "best" initial state
# fist output
self._output = nn.sigmoid(self.__state + self._bias, self._response, self._activation_type)
# integration step
self.__dt = 0.05 # depending on the tau constant, the integration step must
# be adjusted accordingly to avoid numerical instability
def set_integration_step(self, step):
self.__dt = step
def set_init_state(self, state):
self.__state = state
self._output = nn.sigmoid(self.__state + self._bias, self._response, self._activation_type)
def activate(self):
""" Updates neuron's state for a single time-step. . """
assert self._type is not 'INPUT'
self.__update_state()
return nn.sigmoid(self.__state + self._bias, self._response, self._activation_type)
def __update_state(self):
""" Returns neuron's next state using Forward-Euler method. """
self.__state += self.__dt*(1.0/self.__tau)*(-self.__state + self._update_activation())
def create_phenotype(chromo):
""" Receives a chromosome and returns its phenotype (a CTRNN). """
neurons_list = [CTNeuron(ng._type,
ng._id,
ng._bias,
ng._response,
ng._activation_type,
ng._time_constant) \
for ng in chromo._node_genes]
conn_list = [(cg.innodeid, cg.outnodeid, cg.weight) \
for cg in chromo.conn_genes if cg.enabled]
return nn.Network(neurons_list, conn_list, chromo.sensors)
if __name__ == "__main__":
# This example follows from Beer's C++ source code available at:
# http://mypage.iu.edu/~rdbeer/
# create two output neurons (they won't receive any external inputs)
N1 = CTNeuron('OUTPUT', 1, -2.75, 1.0, 'exp', 0.5)
N2 = CTNeuron('OUTPUT', 2, -1.75, 1.0, 'exp', 0.5)
N1.set_init_state(-0.084000643)
N2.set_init_state(-0.408035109)
neurons_list = [N1, N2]
# create some synapses
conn_list = [(1, 1, 4.5), (1, 2, -1.0), (2, 1, 1.0), (2, 2, 4.5)]
# create the network
net = nn.Network(neurons_list, conn_list)
# activates the network
print("%.17f %.17f" %(N1._output, N2._output))
for i in range(1000):
#print net.pactivate()
output = net.pactivate()
print("%.17f %.17f" %(output[0], output[1]))
|
davidmfinol/py3NEAT
|
neat/ctrnn/ctrnn_pure.py
|
Python
|
gpl-3.0
| 3,379
|
[
"NEURON"
] |
9fff27414ec6390bd7e382d0e2a6084f141766bb0cd065d3b52b674848523dc6
|
#! /usr/bin/env python3
#
# Author: Martin Schreiber
# Email: schreiberx@gmail.com
# Date: 2017-06-17
#
import sys
import math
import cmath
import os
d = os.path.dirname(os.path.realpath(__file__))
sys.path.append(d+'/..')
import EFloat as ef
from trexi.TREXI_GaussianCoefficients import *
from trexi.TREXI_GaussianPhi0 import *
sys.path.pop()
class TREXI_GaussianPhi0:
def __init__(
self,
gaussphi0_N, # required argument
gaussphi0_basis_function_spacing, # required argument
floatmode = None
):
self.efloat = ef.EFloat(floatmode)
self.h = self.efloat.to(gaussphi0_basis_function_spacing)
self.M = int(gaussphi0_N)
self.b = [self.efloat.exp(self.h*self.h)*self.efloat.exp(-1j*(float(m)*self.h)) for m in range(-self.M, self.M+1)]
# Generate dummy Gaussian function
fafcoeffs = TREXI_GaussianCoefficients()
fafcoeffs.function_name = 'gaussianorig'
fafcoeffs.function_scaling = self.efloat.to(1.0)
def output(self):
for i in self.b:
print(i)
def fun(
self,
i_x
):
return self.efloat.exp(self.efloat.i*i_x)
|
schreiberx/sweet
|
mule_local/python/mule_local/rexi/trexi/TREXI_GaussianPhi0.py
|
Python
|
mit
| 1,230
|
[
"Gaussian"
] |
e6104d4e78985eaadf87dd696669476604fe455daad5d8283b291e695d08447b
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.dialogs.invoicedialog import SaleInvoicePrinterDialog
from stoqlib.gui.test.uitestutils import GUITest
class TestSaleInvoicePrinterDialog(GUITest):
def test_show(self):
sale = self.create_sale()
printer = self.create_invoice_printer()
editor = SaleInvoicePrinterDialog(self.store, sale, printer)
self.check_editor(editor, 'dialog-sale-invoice-show')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_saleinvoiceprinterdialog.py
|
Python
|
gpl-2.0
| 1,298
|
[
"VisIt"
] |
bb69dd2df0d7bb2b41035150e8e18c7f52e9526ae99b04992eacb9d9e406f090
|
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 7 11:33:27 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import CGATPipelines.Pipeline as P
import CGATPipelines
################################################################
# Options related to CGAT pipelines
# path were documentation source resides.
# Use environment variable SPHINX_DOCSDIR.
# If unset, take the location of CGATPipelines
docsdir = os.environ.get("SPHINX_DOCSDIR",
os.path.join(os.path.dirname(CGATPipelines.__file__),
'pipeline_docs'))
if not os.path.exists(docsdir):
raise ValueError("documentation directory '%s' not found" % docsdir)
themedir = os.path.join(os.path.dirname(CGATPipelines.__file__),
'pipeline_docs',
'themes')
logopath = os.path.join(themedir, "cgat_logo.png")
################################################################
# Import pipeline configuration from pipeline.ini in the current
# directory and the common one.
# PATH were code for pipelines is stored
pipelinesdir = os.path.dirname(CGATPipelines.__file__)
# The default configuration file - 'inifile' is read by
# sphinx-report.
inifile = os.path.join(os.path.dirname(CGATPipelines.__file__),
'configuration',
'pipeline.ini')
PARAMS = P.getParameters([inifile, "pipeline.ini"])
# Definition now part of CGATReport
# def setup(app):
# app.add_config_value('PARAMS', {}, True)
################################################################
################################################################
################################################################
# The pipeline assumes that sphinxreport is called within the
# working directory. If the report is in a separate build directory,
# change the paths below.
#
# directory with export directory from pipeline
# This should be a directory in the build directory - you can
# link from here to a directory outside the build tree, though.
exportdir = os.path.abspath(PARAMS['exportdir'])
datadir = os.path.abspath(PARAMS['datadir'])
################################################################
################################################################
################################################################
# sphinx options
################################################################
# General information about the project.
project = PARAMS['projectname']
copyright = PARAMS['copyright']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PARAMS['version']
# The full version, including alpha/beta/rc tags.
release = PARAMS['release']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path = [os.path.abspath('.'),
pipelinesdir,
os.path.abspath('%s/trackers' % docsdir)] + sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'CGATReport.only_directives',
'CGATReport.report_directive',
'sphinx.ext.inheritance_diagram',
'CGATReport.errors_directive',
'CGATReport.warnings_directive',
'CGATReport.roles']
if P.CONFIG.has_section('intersphinx'):
intersphinx_mapping = dict(
[(x, (os.path.abspath(y), None))
for x, y in P.CONFIG.items('intersphinx')])
# Included at the end of each rst file
rst_epilog = '''
.. _CGAT: http://www.cgat.org
.. _CGAT Training Programme: http://www.cgat.org
.. _CGAT Pipelines: https://www.cgat.org/downloads/public/cgat/documentation/Pipelines.html#pipelines
.. _CGAT Scripts: https://www.cgat.org/downloads/public/cgat/documentation/cgat.html#cgat
.. _pysam: http://code.google.com/p/pysam/
.. _samtools: http://samtools.sourceforge.net/
.. _tabix: http://samtools.sourceforge.net/tabix.shtml/
.. _Galaxy: https://main.g2.bx.psu.edu/
.. _cython: http://cython.org/
.. _python: http://python.org/
.. _pyximport: http://www.prescod.net/pyximport/
.. _sphinx: http://sphinx-doc.org/
.. _ruffus: http://www.ruffus.org.uk/
.. _sphinxreport: http://code.google.com/p/sphinx-report/
.. _sqlite: http://www.sqlite.org/
.. _make: http://www.gnu.org/software/make
.. _UCSC: http://genome.ucsc.edu
.. _ENSEMBL: http://www.ensembl.org
.. _GO: http://www.geneontology.org
.. _gwascatalog: http://www.genome.gov/gwastudies/
.. _distlid: http://distild.jensenlab.org/
.. _mysql: https://mariadb.org/
.. _postgres: http://www.postgresql.org/
.. _bedtools: http://bedtools.readthedocs.org/en/latest/
.. _UCSC Tools: http://genome.ucsc.edu/admin/git.html
.. _git: http://git-scm.com/
.. _sge: http://wikis.sun.com/display/GridEngine/Home
.. _alignlib: https://github.com/AndreasHeger/alignlib
'''
# Add any paths that contain templates here, relative to this directory.
# Add any paths that contain templates here, relative to this directory.
templates_path = [os.path.relpath('%s/_templates' % docsdir)]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'test'
copyright = u'2014, %CGAT%'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ["**/.*.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cgat'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [themedir]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = logopath
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'test.tex', u'test Documentation',
u'\\%CGAT\\%', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'test', u'test Documentation',
[u'%CGAT%'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'test', u'test Documentation',
u'%CGAT%', 'test', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
MikeDMorgan/scRNAseq
|
pipeline_scRnaseq/conf.py
|
Python
|
mit
| 13,218
|
[
"pysam"
] |
db2892bd69eb3d7f10d8ad37d8fb78f08969434938a2586361e33457f788b06e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.