text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Main PAC class."""
import numpy as np
import logging
from tensorpac.spectral import spectral, hilbertm
from tensorpac.methods import (get_pac_fcn, pacstr, compute_surrogates,
erpac, ergcpac, _ergcpac_perm, preferred_phase,
normalize)
from tensorpac.gcmi import copnorm
from tensorpac.visu import _PacVisual, _PacPlt, _PolarPlt
from tensorpac.io import set_log_level
from tensorpac.config import CONFIG
logger = logging.getLogger('tensorpac')
class _PacObj(object):
"""Main class for relative PAC objects."""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7):
# Frequency checking :
from tensorpac.utils import pac_vec
self._f_pha, self._f_amp = pac_vec(f_pha, f_amp)
self._xvec, self._yvec = self.f_pha.mean(1), self.f_amp.mean(1)
# Check spectral properties :
self._speccheck(dcomplex, cycle, width)
def __str__(self):
"""String representation."""
return self.method
def filter(self, sf, x, ftype='phase', keepfilt=False, edges=None,
n_jobs=-1):
"""Filt the data in the specified frequency bands.
Parameters
----------
sf : float
The sampling frequency.
x : array_like
Array of data of shape (n_epochs, n_times)
ftype : {'phase', 'amplitude'}
Specify if you want to extract phase ('phase') or the amplitude
('amplitude').
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
keepfilt : bool | False
Specify if you only want the filtered data (True). This parameter
is only available with dcomplex='hilbert' and not wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
xfilt : array_like
The filtered data of shape (n_freqs, n_epochs, n_times)
"""
# ---------------------------------------------------------------------
# check inputs
assert isinstance(sf, (int, float)), ("The sampling frequency must be "
"a float number.")
# Compatibility between keepfilt and wavelet :
if (keepfilt is True) and (self._dcomplex is 'wavelet'):
raise ValueError("Using wavelet for the complex decomposition do "
"not allow to get filtered data only. Set the "
"keepfilt parameter to False or set dcomplex to "
"'hilbert'.")
assert ftype in ['phase', 'amplitude'], ("ftype must either be 'phase'"
" or 'amplitude.'")
mne_epochs_type = CONFIG['MNE_EPOCHS_TYPE']
if not isinstance(x, np.ndarray) and type(x) in mne_epochs_type:
x = x.get_data()
sf = x.info['sfreq']
if x.ndim == 1:
x = x[np.newaxis, :]
assert x.ndim == 2, ("x should be a 2d array like (n_epochs, n_times)")
# check edges
if not isinstance(edges, int):
edges = slice(None)
else:
logger.debug(f" Edges {edges} time samples ignored")
edges = slice(edges, -edges)
self._edges = edges
# ---------------------------------------------------------------------
# Switch between phase or amplitude :
if ftype is 'phase':
tosend = 'pha' if not keepfilt else None
xfilt = spectral(x, sf, self.f_pha, tosend, self._dcomplex,
self._cycle[0], self._width, n_jobs)
elif ftype is 'amplitude':
tosend = 'amp' if not keepfilt else None
xfilt = spectral(x, sf, self.f_amp, tosend, self._dcomplex,
self._cycle[1], self._width, n_jobs)
return xfilt[..., edges]
def _speccheck(self, dcomplex=None, cycle=None, width=None):
"""Check spectral parameters."""
# Check cycle :
if cycle is not None:
cycle = np.asarray(cycle)
if (len(cycle) is not 2) or not cycle.dtype == int:
raise ValueError("Cycle must be a tuple of two integers.")
else:
self._cycle = cycle
# Check complex decomposition :
if dcomplex is not None:
if dcomplex not in ['hilbert', 'wavelet']:
raise ValueError("dcomplex must either be 'hilbert' or "
"'wavelet'.")
else:
self._dcomplex = dcomplex
# Convert Morlet's width :
if width is not None:
self._width = int(width)
@staticmethod
def _phampcheck(pha, amp):
"""Check phase and amplitude values."""
assert pha.ndim == 3, ("`pha` should have a shape of (n_pha, n_epochs,"
" n_times)")
assert amp.ndim == 3, ("`amp` should have a shape of (n_pha, n_epochs,"
" n_times)")
assert pha.shape[1:] == amp.shape[1:], ("`pha` and `amp` must have the"
" same number of trials, "
"channels and time points")
if not np.ptp(pha) <= 2 * np.pi:
logger.error("Your phase is probably in degrees and should be "
"converted in radians using either np.degrees or "
"np.deg2rad.")
# force the phase to be in [-pi, pi]
pha = (pha + np.pi) % (2. * np.pi) - np.pi
return pha, amp
def _infer_pvalues(self, effect, perm, p=.05, mcp='maxstat'):
"""Global function for statistical inferences.
In order to work this method requires :
* effect = array like of shape (n_dims...)
* perm = array like of shape (n_perm, n_dims...)
"""
assert all([isinstance(k, np.ndarray) for k in (effect, perm)])
n_perm = perm.shape[0]
# compute the minimum number of required permutations
n_perm_req = int(10. / p)
if n_perm < n_perm_req:
logger.warning(f"For inferences at p<{p}, it is recommended to per"
f"form at least n_perm={n_perm_req} permutations")
# ---------------------------------------------------------------------
logger.info(f" infer p-values at (p={p}, mcp={mcp})")
# computes the pvalues
if mcp is 'maxstat':
max_p = perm.reshape(n_perm, -1).max(1)[np.newaxis, ...]
nb_over = (effect[..., np.newaxis] <= max_p).sum(-1)
pvalues = nb_over / n_perm
# non-signi. p-values are set to 1. and min(pvalues) = 1 / n_perm
pvalues[pvalues >= p] = 1.
pvalues = np.maximum(1. / n_perm, pvalues)
elif mcp in ['fdr', 'bonferroni']:
from mne.stats import fdr_correction, bonferroni_correction
fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction
# compute the p-values
pvalues = (effect[np.newaxis, ...] <= perm).sum(0) / n_perm
pvalues = np.maximum(1. / n_perm, pvalues)
# apply correction
is_signi, pvalues = fcn(pvalues, alpha=p)
pvalues[~is_signi] = 1.
return pvalues
@property
def f_pha(self):
"""Vector of phases of shape (n_pha, 2)."""
return self._f_pha
@property
def f_amp(self):
"""Vector of amplitudes of shape (n_amp, 2)."""
return self._f_amp
@property
def xvec(self):
"""Vector of phases of shape (n_pha,) use for plotting."""
return self._xvec
@property
def yvec(self):
"""Vector of amplitudes of shape (n_amp,) use for plotting."""
return self._yvec
# ----------- DCOMPLEX -----------
@property
def dcomplex(self):
"""Get the dcomplex value."""
return self._dcomplex
@dcomplex.setter
def dcomplex(self, value):
"""Set dcomplex value."""
self._speccheck(dcomplex=value)
# ----------- CYCLE -----------
@property
def cycle(self):
"""Get the cycle value."""
return self._cycle
@cycle.setter
def cycle(self, value):
"""Set cycle value."""
self._speccheck(cycle=value)
# ----------- WIDTH -----------
@property
def width(self):
"""Get the width value."""
return self._width
@width.setter
def width(self, value):
"""Set width value."""
self._width = value
class Pac(_PacObj, _PacPlt):
"""Compute Phase-Amplitude Coupling (PAC).
Computing PAC is assessed in three steps : compute the real PAC, compute
surrogates and finally, because PAC is very sensible to the noise, correct
the real PAC by the surrogates. This implementation is modular i.e. it lets
you choose among a large range of possible combinations.
Parameters
----------
idpac : tuple/list | (1, 1, 3)
Choose the combination of methods to use in order to extract PAC.
This tuple must be composed of three integers where each one them
refer
* First digit : refer to the pac method
- 1 : Mean Vector Length (MVL) :cite:`canolty2006high`
(see :func:`tensorpac.methods.mean_vector_length`)
- 2 : Modulation Index (MI) :cite:`tort2010measuring`
(see :func:`tensorpac.methods.modulation_index`)
- 3 : Heights Ratio (HR) :cite:`lakatos2005oscillatory`
(see :func:`tensorpac.methods.heights_ratio`)
- 4 : ndPAC :cite:`ozkurt2012statistically`
(see :func:`tensorpac.methods.norm_direct_pac`)
- 5 : Phase-Locking Value (PLV)
:cite:`penny2008testing,lachaux1999measuring`
(see :func:`tensorpac.methods.phase_locking_value`)
- 6 : Gaussian Copula PAC (GCPAC) :cite:`ince2017statistical`
(see :func:`tensorpac.methods.gauss_cop_pac`)
* Second digit : refer to the method for computing surrogates
- 0 : No surrogates
- 1 : Swap phase / amplitude across trials
:cite:`tort2010measuring`
(see :func:`tensorpac.methods.swap_pha_amp`)
- 2 : Swap amplitude time blocks
:cite:`bahramisharif2013propagating`
(see :func:`tensorpac.methods.swap_blocks`)
- 3 : Time lag :cite:`canolty2006high`
(see :func:`tensorpac.methods.time_lag`)
* Third digit : refer to the normalization method for correction
- 0 : No normalization
- 1 : Substract the mean of surrogates
- 2 : Divide by the mean of surrogates
- 3 : Substract then divide by the mean of surrogates
- 4 : Z-score
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
* Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors. In that
case, it uses the definition proposed by Bahramisharif et al.
2013 :cite:`bahramisharif2013propagating` i.e
f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude :cite:`bahramisharif2013propagating`.
width : int | 7
Width of the Morlet's wavelet.
n_bins : int | 18
Number of bins for the KLD and HR PAC method
:cite:`tort2010measuring,lakatos2005oscillatory`
"""
def __init__(self, idpac=(1, 2, 3), f_pha=[2, 4], f_amp=[60, 200],
dcomplex='hilbert', cycle=(3, 6), width=7, n_bins=18,
verbose=None):
"""Check and initialize."""
set_log_level(verbose)
self._idcheck(idpac)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
self.n_bins = int(n_bins)
logger.info("Phase Amplitude Coupling object defined")
def fit(self, pha, amp, n_perm=200, p=.05, mcp='maxstat', n_jobs=-1,
random_state=None, verbose=None):
"""Compute PAC on filtered data.
Parameters
----------
pha : array_like
Array of phases of shape (n_pha, n_epochs, n_times).
Angles should be in rad.
amp : array_like
Array of amplitudes of shape (n_amp, n_epochs, n_times).
n_perm : int | 200
Number of surrogates to compute.
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
random_state : int | None
Fix the random state of the machine for reproducible results.
Returns
-------
pac : array_like
Phase-Amplitude Coupling measure of shape (n_amp, n_pha, n_epochs)
Attributes
----------
pac : array_like
Unormalized Phase-Amplitude Coupling measure of shape (n_amp,
n_pha, n_epochs)
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
surrogates : array_like
Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)
"""
set_log_level(verbose)
# ---------------------------------------------------------------------
# input checking
pha, amp = self._phampcheck(pha, amp)
self._pvalues, self._surrogates = None, None
# for the plv, extract the phase of the amplitude
if self._idpac[0] == 5:
amp = np.angle(hilbertm(amp))
# ---------------------------------------------------------------------
# check if permutations should be computed
if self._idpac[1] == 0:
n_perm = None
if not isinstance(n_perm, int) or not (n_perm > 0):
self._idpac = (self._idpac[0], 0, 0)
compute_surro = False
else:
compute_surro = True
# ---------------------------------------------------------------------
# copnorm if gaussian copula is used
if self._idpac[0] == 6:
logger.debug(f" copnorm the phase and the amplitude")
pha = copnorm(np.stack([np.sin(pha), np.cos(pha)], axis=-2))
amp = copnorm(amp[..., np.newaxis, :])
# ---------------------------------------------------------------------
# true pac estimation
logger.info(f' true PAC estimation using {self.method}')
fcn = get_pac_fcn(self.idpac[0], self.n_bins, p)
pac = fcn(pha, amp)
self._pac = pac.copy()
# ---------------------------------------------------------------------
# compute surrogates (if needed)
if compute_surro:
if random_state is None:
random_state = int(np.random.randint(0, 10000, size=1))
logger.info(f" compute surrogates ({self.str_surro}, {n_perm} "
f"permutations, random_state={random_state})")
surro = compute_surrogates(pha, amp, self.idpac[1], fcn, n_perm,
n_jobs, random_state)
self._surrogates = surro
# infer pvalues
self.infer_pvalues(p, mcp=mcp)
# ---------------------------------------------------------------------
# normalize (if needed)
if self._idpac[2] != 0:
# Get the mean / deviation of surrogates
logger.info(" normalize true PAC estimation by surrogates "
f"({self.str_norm})")
normalize(self.idpac[2], pac, surro)
return pac
def filterfit(self, sf, x_pha, x_amp=None, n_perm=200, p=.05,
mcp='maxstat', edges=None, n_jobs=-1, random_state=None,
verbose=None):
"""Filt the data then compute PAC on it.
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing PAC. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes. If you want to compute PAC locally i.e. on the
same electrode, x=x_pha=x_amp. For distant coupling, x_pha and
x_amp could be different but still must to have the same shape.
n_perm : int | 200
Number of surrogates to compute.
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
random_state : int | None
Fix the random state of the machine for reproducible results.
Returns
-------
pac : array_like
Phase-Amplitude Coupling measure of shape (namp, npha, ...).
Attributes
----------
pac : array_like
Unormalized Phase-Amplitude Coupling measure of shape (n_amp,
n_pha, n_epochs)
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
surrogates : array_like
Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)
"""
# Check if amp is None :
if x_amp is None:
x_amp = x_pha
# Shape checking :
assert x_pha.shape == x_amp.shape, ("Inputs `x_pha` and `x_amp` must "
"have the same shape.")
# Extract phase (npha, ...) and amplitude (namp, ...) :
logger.info(f" extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, 'phase', **kw)
amp = self.filter(sf, x_amp, 'amplitude', **kw)
# Special cases :
if self._idpac[0] == 5:
amp = np.angle(hilbertm(amp))
# Compute pac :
return self.fit(pha, amp, p=p, mcp=mcp, n_perm=n_perm, n_jobs=n_jobs,
random_state=random_state, verbose=verbose)
def infer_pvalues(self, p=0.05, mcp='maxstat'):
"""Infer p-values based on surrogate distribution.
Parameters
----------
p : float | 0.05
Significiency threshold
Returns
-------
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
"""
# ---------------------------------------------------------------------
# check that pac and surrogates has already been computed
assert hasattr(self, 'pac'), ("You should compute PAC first. Use the "
"`fit` method")
assert hasattr(self, 'surrogates'), "No surrogates computed"
# mean pac and surrogates across trials
m_pac, m_surro = self.pac.mean(2), self.surrogates.mean(3)
self._pvalues = self._infer_pvalues(m_pac, m_surro, p=p, mcp=mcp)
return self._pvalues
def _idcheck(self, idpac):
"""Check the idpac parameter."""
idpac = np.atleast_1d(idpac)
if not all([isinstance(k, int) for k in idpac]) and (len(idpac) != 3):
raise ValueError("idpac must be a tuple/list of 3 integers.")
# Ozkurt PAC case (doesn't need surrogates and normalization)
if idpac[0] == 4:
idpac = np.array([4, 0, 0])
if (idpac[0] == 1) and (idpac[1] == 0) and (idpac[2] == 0):
logger.warning(
"MVL is amplitude dependent which means that if the amplitude "
"increases, MVL also increases. You should select a "
"normalization method for correcting this limitation "
"(e.g idpac=(1, 2, 4))")
if (idpac[2] != 0) and (idpac[1] == 0):
logger.warning("If you want to normalize the estimated PAC, you "
"should select a surrogate method (second digit of "
"`idpac`). Normalization ignored.")
idpac[2] = 0
self._idpac = idpac
# string representation
self.method, self.str_surro, self.str_norm = pacstr(idpac)
@property
def idpac(self):
"""Get the idpac value."""
return self._idpac
@idpac.setter
def idpac(self, value):
"""Set idpac value."""
self._idcheck(value)
@property
def pac(self):
"""Array of un-normalized PAC of shape (n_amp, n_pha, n_epochs)."""
return self._pac
@property
def surrogates(self):
"""Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)."""
return self._surrogates
@property
def pvalues(self):
"""Array of p-values of shape (n_amp, n_pha)."""
return self._pvalues
class EventRelatedPac(_PacObj, _PacVisual):
"""Compute the Event Related Phase-Amplitude Coupling (ERPAC).
The traditional PAC approach is computed across time, hence this means that
you can't observe PAC changes across time. In contrast, the ERPAC is
computed across epochs (or trials) which preserves the time dimension.
Parameters
----------
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude.
width : int | 7
Width of the Morlet's wavelet.
"""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7, verbose=None):
"""Check and initialize."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
logger.info("Event Related PAC object defined")
def fit(self, pha, amp, method='circular', smooth=None, n_jobs=-1,
n_perm=None, p=.05, mcp='fdr', verbose=None):
"""Compute the Event-Related Phase-Amplitude Coupling (ERPAC).
The ERPAC :cite:`voytek2013method` is used to measure PAC across trials
and is interesting for real-time estimation.
Parameters
----------
pha, amp : array_like
Respectively the phase of slower oscillations of shape
(n_pha, n_epochs, n_times) and the amplitude of faster
oscillations of shape (n_pha, n_epochs, n_times).
method : {'circular', 'gc'}
Name of the method for computing erpac. Use 'circular' for
reproducing :cite:`voytek2013method` or 'gc' for a Gaussian-Copula
based erpac :cite:`ince2017statistical`.
smooth : int | None
Half number of time-points to use to produce a smoothing. Only
active with the Gaussian-Copula ('gc') method.
n_perm : int | None
Number of permutations to compute for assessing p-values for the
gaussian-copula ('gc') method. Statistics are performed by randomly
swapping phase trials
p : float | 0.05
Statistical threshold for the gaussian-copula ('gc') method
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
Returns
-------
erpac : array_like
The ERPAC estimation of shape (n_amp, n_pha, n_times)
"""
set_log_level(verbose)
pha, amp = self._phampcheck(pha, amp)
self.method = method
self._pvalues = None
# move the trial axis to the end (n_freqs, n_times, n_epochs)
pha, amp = np.moveaxis(pha, 1, -1), np.moveaxis(amp, 1, -1)
# method switch
if method == 'circular':
self.method = "ERPAC (Voytek et al. 2013)"
logger.info(f" Compute {self.method}")
self._erpac, self._pvalues = erpac(pha, amp)
self.infer_pvalues(p=p, mcp=mcp)
elif method == 'gc':
self.method = "Gaussian-Copula ERPAC"
logger.info(f" Compute {self.method}")
# copnorm phases and amplitudes then compute erpac
sco = copnorm(np.stack([np.sin(pha), np.cos(pha)], axis=-2))
amp = copnorm(amp)[..., np.newaxis, :]
self._erpac = ergcpac(sco, amp, smooth=smooth, n_jobs=n_jobs)
# compute permutations (if needed)
if isinstance(n_perm, int) and (n_perm > 0):
logger.info(f" Compute {n_perm} permutations")
self._surrogates = _ergcpac_perm(sco, amp, smooth=smooth,
n_jobs=n_jobs, n_perm=n_perm)
self.infer_pvalues(p=p, mcp=mcp)
return self.erpac
def filterfit(self, sf, x_pha, x_amp=None, method='circular', smooth=None,
n_perm=None, p=.05, mcp='fdr', edges=None, n_jobs=-1,
verbose=None):
"""Extract phases, amplitudes and compute ERPAC.
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing ERPAC. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes (i.e n_epochs, n_times). If you want to compute
local ERPAC i.e. on the same electrode, x=x_pha=x_amp. For distant
coupling, x_pha and x_amp could be different but still must to have
the same shape.
method : {'circular', 'gc'}
Name of the method for computing erpac. Use 'circular' for
reproducing :cite:`voytek2013method` or 'gc' for a Gaussian-Copula
based erpac.
smooth : int | None
Half number of time-points to use to produce a smoothing. Only
active with the Gaussian-Copula ('gc') method
:cite:`ince2017statistical`.
n_perm : int | None
Number of permutations to compute for assessing p-values for the
gaussian-copula ('gc') method. Statistics are performed by randomly
swapping phase trials
p : float | 0.05
Statistical threshold for the gaussian-copula ('gc') method
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
erpac : array_like
The ERPAC estimation of shape (n_amp, n_pha, n_times)
"""
x_amp = x_pha if not isinstance(x_amp, np.ndarray) else x_amp
# extract phases and amplitudes
logger.info(f" Extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, ftype='phase', **kw)
amp = self.filter(sf, x_amp, ftype='amplitude', **kw)
# compute erpac
return self.fit(pha, amp, method=method, smooth=smooth, n_jobs=n_jobs,
n_perm=n_perm, p=p, mcp=mcp, verbose=verbose)
def infer_pvalues(self, p=0.05, mcp='fdr'):
"""Infer p-values based on surrogate distribution.
Parameters
----------
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
Returns
-------
pvalues : array_like
Array of p-values of shape (n_amp, n_pha, n_times)
"""
# ---------------------------------------------------------------------
# check that pac and surrogates has already been computed
assert hasattr(self, 'erpac'), ("You should compute ERPAC first. Use "
"the `fit` method")
assert mcp in ['fdr', 'bonferroni']
# correct the p-values for multiple comparisons (Voytek's only)
if "Voytek" in self.method:
logger.info(f" Correct p-values for multiple-comparisons using "
f"{mcp} correction of MNE-Python")
from mne.stats import fdr_correction, bonferroni_correction
fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction
_, self._pvalues = fcn(self._pvalues, alpha=p)
else:
assert hasattr(self, 'surrogates'), "No surrogates computed"
# compute the p-values using maxstat (gcPAC)
self._pvalues = self._infer_pvalues(self.erpac, self.surrogates,
p=p)
return self._pvalues
@property
def erpac(self):
"""Array of event-related PAC of shape ()."""
return self._erpac
@property
def surrogates(self):
"""Array of surrogates of shape (n_perm, n_amp, n_pha, n_times)."""
return self._surrogates
@property
def pvalues(self):
"""Array of p-values of shape (n_amp, n_pha, n_times)."""
return self._pvalues
class PreferredPhase(_PacObj, _PolarPlt):
"""Compute the Preferred Phase (PP).
The preferred phase is defined as the phase at which the amplitude is
maximum.
Parameters
----------
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude.
width : int | 7
Width of the Morlet's wavelet.
"""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7, verbose=None):
"""Check and initialize."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
logger.info("Preferred phase object defined")
self.method = 'Preferred-Phase (PP)'
def fit(self, pha, amp, n_bins=72):
"""Compute the preferred-phase.
Parameters
----------
pha, amp : array_like
Respectively the phase of slower oscillations of shape
(n_pha, n_epochs, n_times) and the amplitude of faster
oscillations of shape (n_pha, n_epochs, n_times).
n_bins : int | 72
Number of bins for bining the amplitude according to phase
slices.
Returns
-------
binned_amp : array_like
The binned amplitude according to the phase of shape
(n_bins, n_amp, n_pha, n_epochs)
pp : array_like
The prefered phase where the amplitude is maximum of shape
(namp, npha, n_epochs)
polarvec : array_like
The phase vector for the polar plot of shape (n_bins,)
"""
# Check phase and amplitude shapes :
pha, amp = self._phampcheck(pha, amp)
return preferred_phase(pha, amp, n_bins=n_bins)
def filterfit(self, sf, x_pha, x_amp=None, edges=None, n_bins=12,
verbose=None):
"""Extract phases, amplitudes and compute the preferred phase (PP).
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing PP. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes (i.e n_epochs, n_times). If you want to compute
local PP i.e. on the same electrode, x=x_pha=x_amp. For distant
coupling, x_pha and x_amp could be different but still must to have
the same shape.
n_bins : int | 72
Number of bins for bining the amplitude according to phase
slices.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
binned_amp : array_like
The binned amplitude according to the phase of shape
(n_bins, n_amp, n_pha, n_epochs)
pp : array_like
The prefered phase where the amplitude is maximum of shape
(namp, npha, n_epochs)
polarvec : array_like
The phase vector for the polar plot of shape (n_bins,)
"""
x_amp = x_pha if not isinstance(x_amp, np.ndarray) else x_amp
# extract phases and amplitudes
logger.info(f" Extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, ftype='phase', **kw)
amp = self.filter(sf, x_amp, ftype='amplitude', **kw)
# compute pp
return self.fit(pha, amp, n_bins=n_bins)
|
EtienneCmb/tensorpac
|
tensorpac/pac.py
|
Python
|
bsd-3-clause
| 36,901
|
[
"Gaussian"
] |
fd958ef21ca7854714f77c7cd4934202b2bb9f8cfd4cf765907f8b4e893ebd82
|
import os, sys
import vtktools
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from paraview.simple import *
## READ archive (too many points... somehow)
label = sys.argv[1]
dayi = int(sys.argv[2])
dayf = int(sys.argv[3])
days = int(sys.argv[4])
path = '/tamay/mensa/fluidity/hycom_winter/'+label+'/'
reader = XMLPartitionedUnstructuredGridReader()
filter = CleantoGrid(reader)
writer = XMLPUnstructuredGridWriter(Input=filter)
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
Tmax = 21.0
Tmin = 19.8
for time in range(dayi,dayf,days):
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
file0 = 'mli_' + str(time) + '.pvtu'
filepath = path+file0
file1 = label+'_' + tlabel
fileout = path + file1
filetemp = './temp_'+label+'.pvtu'
print 'opening: ', filepath
reader.FileName = filepath
reader.PointArrayStatus = ['Temperature','Velocity']
filter = CleantoGrid(reader)
writer.FileName = filetemp
writer.Input = filter
writer.UpdatePipeline()
data = vtktools.vtu(filetemp)
#print 'fields: ', data.GetFieldNames()
coord = data.GetLocations()
T = data.GetScalarField('Temperature')
U = data.GetVectorField('Velocity')
#T = data.GetScalarField('T')
del data
id_T = np.asarray(np.where(coord[:,2] == 0)) # values from surface
x = coord[id_T,0].flatten()
y = coord[id_T,1].flatten()
Us = U[id_T,0].flatten()
Vs = U[id_T,1].flatten()
Ws = U[id_T,2].flatten()
Ts = T[id_T].flatten()
Ts[np.asarray(np.where(Ts > Tmax))] = Tmax
Ts[np.asarray(np.where(Ts < Tmin))] = Tmin
## PLOT
# mask quiver for legend
id_x = np.asarray(np.where(x > 21000))
id_y = np.asarray(np.where(y > 49000))
Us[np.intersect1d(id_x,id_y)] = float('NaN')
Vs[np.intersect1d(id_x,id_y)] = float('NaN')
plt.figure()
plt.gca().set_aspect('equal')
plt.autoscale(enable=True, axis='both', tight=True)
if time == 0:
plt.triplot(x,y,linewidth=0.5,color='gray')
v1 = np.linspace(Tmin, Tmax, 50, endpoint=True)
v2 = np.linspace(Tmin, Tmax, 11, endpoint=True)
plt.tricontourf(x,y,Ts,v1)
plt.colorbar(ticks=v2)
# q = plt.quiver(x,y,Us,Vs,units='width',scale=1/0.25)
# p = plt.quiverkey(q,25000,50000,1,"1 m/s",coordinates='data')
plt.ylabel('Longitude')
plt.xlabel('Latitude')
plt.xticks(range(0,35000,5000),(range(0,35,5)))
plt.yticks(range(0,60000,10000),(range(0,60,10)))
plt.title('T, 2Km, '+str(np.trunc(time*5/24)/10.0)+' day')
plt.savefig('./plot/'+label+'/'+file1+'.eps',bbox_inches='tight')
plt.close()
print 'saved '+'./plot/'+label+'/'+file1+'.eps\n'
# os.system('gs -dSAFER -dBATCH -dNOPAUSE -sDEVICE=png16m -dTextAlphaBits=4 -r300 -sOutputFile=./plot/'+label+'/'+file1+'.png ./plot/'+label+'/'+file1+'.eps')
os.system('gs -sDEVICE=jpeg -dJPEGQ=100 -dNOPAUSE -dBATCH -dSAFER -dTextAlphaBits=4 -r300 -sOutputFile=./plot/'+label+'/'+file1+'.jpg ./plot/'+label+'/'+file1+'.eps')
os.system('mogrify ./plot/'+label+'/'+file1+'.jpg -trim ./plot/'+label+'/'+file1+'.jpg')
# os.system('mogrify ./plot/'+label+'/'+file1+'.png -trim ./plot/'+label+'/'+file1+'.png')
print 'Making movie animation'
fr = '10'
br = '4096k'
crf = '24'
opts = '-y -f image2 -r '+fr+' -i ./plot/'+label+'/'+label+'_%03d.jpg -vcodec'
#ffmpeg_ogg = 'ffmpeg '+opts+' libtheora -b:v '+br+' ./plot/'+label+'/'+label+'.ogg'
ffmpeg_mp4 = 'ffmpeg '+opts+' libx264 -threads 0 -crf '+crf+' -s 1250x1620 ./plot/'+label+'/'+label+'.avi'
#print ffmpeg_ogg
#os.system(ffmpeg_ogg)
print ffmpeg_mp4
os.system(ffmpeg_mp4)
|
jungla/ICOM-fluidity-toolbox
|
2D/U/plot_T_hw.py
|
Python
|
gpl-2.0
| 3,539
|
[
"ParaView"
] |
747f3c15895677dac99ac26894055942b1184c623fc050c16ec4e04dd21abad0
|
"""
@file create_hdf5.py
"""
import h5py
import numpy as np
import scipy.stats as ss
from os.path import join
def generate_data(path):
distributions = {'Gaussian': {'options': dict(loc=0, scale=0.1),
'name': 'norm'},
'Exponential': {'options': dict(loc=-0.5, scale=1),
'name': 'expon'},
'Chi Square': {'options': dict(loc=0.5, df=1),
'name': 'chi2'},
'Alpha': {'options': dict(a=3, loc=-0.5),
'name': 'alpha'},
'Beta': {'options': dict(a=3, b=2, loc=-0.5),
'name': 'beta'}
}
x = np.linspace(-1, 1, num=1000)
with h5py.File(join(path, 'demo_data.hdf5'), 'w') as f:
for group, vals in distributions.items():
gauss_pdf = f.create_group(group)
gauss_pdf.create_dataset("x", data=x)
gauss_pdf.create_dataset("pdf", data=getattr(
ss, vals['name'])(**vals['options']).pdf(x))
|
ernestyalumni/MLgrabbag
|
visualization/bokehplus/app/simple_hdf5/create_hdf5.py
|
Python
|
mit
| 1,069
|
[
"Gaussian"
] |
bb7b806d69c29ced9e8a9aa7309db5a783878fcf6dd068b72c3b4804e3bc9cb7
|
##############################################################
# Date: 20/01/16
# Name: plot_spacing.py
# Author: Alek Petty
# Description: Script to plot spacing between ATM points
# Input requirements: ATM and DMS for a given section
# Output: 1 km ATM spacing
import matplotlib
matplotlib.use("AGG")
import IB_functions as ro
import matplotlib.colors as colors
# basemap import
from mpl_toolkits.basemap import Basemap, shiftgrid
from skimage import measure
# Numpy import
import numpy as np
from pylab import *
from scipy.io import netcdf
import numpy.ma as ma
import string
from matplotlib.patches import Polygon
from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid.inset_locator import mark_inset
from mpl_toolkits.axes_grid.anchored_artists import AnchoredSizeBar
from scipy import stats
from scipy.interpolate import griddata
from matplotlib import rc
from netCDF4 import Dataset
from glob import glob
import os
from osgeo import osr, gdal
import mpl_toolkits.basemap.pyproj as pyproj
from scipy import ndimage
import matplotlib.tri as tri
import scipy.interpolate
import time
import h5py
from scipy.spatial import cKDTree as KDTree
rcParams['axes.labelsize'] = 9
rcParams['xtick.labelsize']=9
rcParams['ytick.labelsize']=9
rcParams['legend.fontsize']=9
rcParams['font.size']=9
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
#rc('font', family='sans-serif')
#rc('font', serif='Helvetica Neue')
#rc('text', usetex='false')
def plot_section_3():
sizex = np.amax(xatm_km) - np.amin(xatm_km)
sizey = np.amax(yatm_km) - np.amin(yatm_km)
ratio = sizey/sizex
#minvalL = 0
elevation2d_ridge_comp = ma.compressed(elevation2d_ridge_ma)
#maxvalL = np.round(np.percentile(elevation2d_ridge_comp, 99), decimals=1)
lowerp = 5
upperp = 99
minval = np.round(np.percentile(elevation_km, lowerp), decimals=1)
maxval = np.round(np.percentile(elevation_km, upperp), decimals=1)
minvalR = 1.0 #round(np.amin(ridge_height_mesh), 1)
maxvalR = 1.3 #round(np.amax(ridge_height_mesh), 1)
#dms_plot=0
textwidth=5.
fig = figure(figsize=(textwidth,textwidth*0.45*ratio))
ax1 = subplot(131)
ax1.annotate('(a) Raw ATM' , xy=(0.03, 1.03), textcoords='axes fraction', color='k', horizontalalignment='middle', verticalalignment='middle')
#dx_in_points = ax1.transData.transform(1, 1)
#print dx_in_points
#im1 = plt.Circle(xatm_km, yatm_km, 1, c=elevation_km, edgecolor='none')
im1 = scatter(xatm_km, yatm_km, c = elevation_km, vmin = minval, vmax = maxval, s=0.3, lw = 0, cmap = cm.RdYlBu_r, rasterized=True)
#patch = patches.PathPatch(poly_path, facecolor='none', lw=2)
#ax1.add_patch(patch)
ax1.set_aspect('equal')
ax2 = subplot(132)
ax2.annotate('(b) Gridded ('+str(xy_res)+' m) ATM' , xy=(0.03, 1.03), textcoords='axes fraction', color='k', horizontalalignment='middle', verticalalignment='middle')
elevation_rel = ma.masked_where((elevation2d-level_elev)<-0.1, (elevation2d-level_elev))
im2 = pcolormesh(xx2d, yy2d, elevation2d-level_elev, vmin=-(maxval-level_elev), vmax = maxval-level_elev, cmap = cm.RdYlBu_r)
#ax2.add_patch(patch)
ax2.set_aspect('equal')
ax3 = subplot(133)
ax3.annotate('(c) Unique ridges' , xy=(0.03, 1.03), textcoords='axes fraction', color='k', horizontalalignment='middle', verticalalignment='middle')
#ax3.add_patch(patch)
ax3.set_aspect('equal')
if (np.amax(label_im)>=1):
minvalLAB=0
maxvalLAB=np.amax(label_im)
label_im_ma=ma.masked_where(label_im<0.5, label_im)
im3 = pcolormesh(xx2d, yy2d, label_im_ma, vmin = minvalLAB, vmax = maxvalLAB, cmap = my_cmap)
#im3 = pcolormesh(xx2d, yy2d, ridge_height_mesh, vmin = minvalR, vmax = maxvalR, cmap = cm.YlOrRd)
im31 = plot(ridge_stats[:, 0], ridge_stats[:, 1], marker='o', markersize=1, linestyle = 'None', color='k')
#for i in xrange(ridge_stats.shape[0]):
# im32 = plot([ridge_stats[i, 0]-2*ridge_stats[i, 2], ridge_stats[i, 0]+2*ridge_stats[i, 2]], [ridge_stats[i, 1]-2*ridge_stats[i, 3], ridge_stats[i, 1]+2*ridge_stats[i, 3]], marker='None', linewidth=0.5, linestyle = '-', color='k')
axesname = ['ax1', 'ax2', 'ax3']
for plotnum in xrange(3):
vars()[axesname[plotnum]].set_xlim(np.amin(xatm_km),np.amax(xatm_km))
vars()[axesname[plotnum]].set_ylim(np.amin(yatm_km),np.amax(yatm_km))
vars()[axesname[plotnum]].xaxis.set_major_locator(MaxNLocator(5))
vars()[axesname[plotnum]].yaxis.grid(True)
vars()[axesname[plotnum]].xaxis.grid(True)
ax1.set_ylabel('False northing (m)', labelpad=1)
ax2.set_xlabel('False easting (m)', labelpad=1)
ax2.set_yticklabels([])
ax3.set_yticklabels([])
#fig.text(0.3, 0.98, 'DMS Date: '+date+' DMS Time: '+dms_time)
cax = fig.add_axes([0.1, 0.13, 0.2, 0.03])
cbar = colorbar(im1,cax=cax, orientation='horizontal', extend='both', use_gridspec=True)
cbar.set_label('Elevation to WGS84 (m)', labelpad=2)
xticks1 = np.linspace(minval, maxval, 3)
cbar.set_ticks(xticks1)
cax2 = fig.add_axes([0.4, 0.13, 0.2, 0.03])
cbar2 = colorbar(im2,cax=cax2, orientation='horizontal', extend='both', use_gridspec=True)
cbar2.set_label('Elevation to level ice (m)', labelpad=2)
xticks2 = np.linspace(-(maxval-level_elev), maxval-level_elev, 3)
cbar2.set_ticks(xticks2)
if (np.amax(label_im)>=1):
cax3 = fig.add_axes([0.73, 0.13, 0.2, 0.03])
cbar3 = colorbar(im3,cax=cax3, orientation='horizontal', extend='both', use_gridspec=True)
cbar3.set_label('Sail number', labelpad=0)
xticks2 = np.linspace(minvalLAB, maxvalLAB, 2)
cbar3.set_ticks(xticks2)
cbar3.solids.set_rasterized(True)
cbar.solids.set_rasterized(True)
cbar3.solids.set_rasterized(True)
ax3.annotate('# sails:'+str(num_ridges)+'\nSail area:'+ridge_areaL+r' m$^{2}$', xy=(0.03, 0.02), textcoords='axes fraction', color='k', horizontalalignment='left', verticalalignment='bottom')
#plt.tight_layout()
print 'Saving figure...'
subplots_adjust(left=0.09, top = 0.98, right=0.98, bottom=0.23, wspace=0.05)
savefig(figpath+'/3PLOTS/3plot_'+str(year)+str(days)+str(atm_file)+'_'+str(section)+'_'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm'+nstr+'.png', dpi=300)
def plot_spacing():
sizex = np.amax(xatm_km) - np.amin(xatm_km)
sizey = np.amax(yatm_km) - np.amin(yatm_km)
ratio = sizey/sizex
minval = 0.
maxval = 4.
#dms_plot=0
textwidth = 3.
fig = figure(figsize=(textwidth,textwidth*ratio))
subplots_adjust(left=0.14, bottom=0.1, top=0.98)
ax1 = subplot(111)
im1 = scatter(xatm_km, yatm_km, c = spacing, vmin = minval, vmax = maxval, s=1.0, lw = 0, cmap = cm.YlOrRd, rasterized=True)
ax1.set_aspect('equal')
axesname = ['ax1']
ax1.set_xlim(np.amin(xatm_km),np.amax(xatm_km))
ax1.set_ylim(np.amin(yatm_km),np.amax(yatm_km))
ax1.xaxis.set_major_locator(MaxNLocator(5))
ax1.set_xlabel('False easting (m)', labelpad=1)
ax1.set_ylabel('False northing (m)', labelpad=1)
ax1.annotate('Altitude:'+mean_altSTR+'m\nVelocity:'+mean_velSTR+r' m s$^{-1}$' , xy=(0.5, 0.95), textcoords='axes fraction', color='k', horizontalalignment='left', verticalalignment='top')
ax1.annotate('Mean:'+meanSSTR+'m\np(99%):'+nearmaxSSTR+'m\nMax:'+maxSSTR+'m' , xy=(0.03, 0.3), textcoords='axes fraction', color='k', horizontalalignment='left', verticalalignment='bottom')
#fig.text(0.3, 0.98, 'DMS Date: '+date+' DMS Time: '+dms_time)
cax = fig.add_axes([0.23, 0.27, 0.3, 0.03])
cbar = colorbar(im1,cax=cax, orientation='horizontal', extend='max', use_gridspec=True)
cbar.set_label('Shot spacing (m)', labelpad=1)
xticks1 = np.linspace(minval, maxval, 3)
cbar.set_ticks(xticks1)
cbar.solids.set_rasterized(True)
savefig(figpath+'figureS5.png', dpi=1000)
def calc_bulk_stats(stats_found, num_pts_section):
if (stats_found==1):
ice_area = ma.count(elevation2d)*(xy_res**2)
ridge_area_all = ma.count(elevation2d_ridge_ma)*(xy_res**2)
mean_ridge_height_all = np.mean(elevation2d_ridge_ma) - level_elev
mean_ridge_heightL = np.mean(ridge_height_mesh)
ridge_areaL = ma.count(ridge_height_mesh)*(xy_res**2)
return [mean_x, mean_y, ice_area, num_ridges, ridge_area_all, ridge_areaL, mean_ridge_height_all, mean_ridge_heightL, mean_alt, mean_pitch, mean_roll, mean_vel, num_pts_section, stats_found]
elif (stats_found==0):
#a = ma.masked_all((0))
#masked_val = mean(a)
return [mean_x, mean_y, -999, 0,-999, -999, -999, -999, mean_alt, mean_pitch, mean_roll, mean_vel, num_pts_section, stats_found]
#-------------- ATM AND DMS PATHS------------------
datapath='./Data_output/'
rawdatapath = '../../../DATA/'
IBrawdatapath = rawdatapath+'/ICEBRIDGE/'
ATM_path = IBrawdatapath+'/ATM/ARCTIC/'
dms_path = IBrawdatapath+'/DMS/'
posAV_path =IBrawdatapath+'/POSAV/SEA_ICE/GR/'
figpath='./Figures/'
my_cmap=ro.perceptual_colormap("Linear_L", rawdatapath+'/OTHER/CMAPS/', reverse=1)
m=pyproj.Proj("+init=EPSG:3413")
calc_atm_stats=1
min_ridge_height = 0.2
pint=5
pwidth = 20
along_track_res=1000
pts_threshold=18000*(along_track_res/1000)
xy_res=2
num_points_req = 100/(xy_res**2)
year = 2011
days = 0
atm_file = 1
section=843
narrow=0
if (narrow==1):
nstr='N'
ATM_path = ATM_path+'/NARROW/'
else:
nstr=''
ATM_year = ATM_path+str(year)+'/'
atm_files_year = glob(ATM_year+'*/')
atm_path_date = atm_files_year[days]
print 'ATM day:', atm_path_date
if (narrow==1):
atm_files_in_day = glob(atm_path_date+'*.h5')
else:
atm_files_in_day = ro.get_atm_files(atm_path_date, year)
#load POS file
posAV = loadtxt(posAV_path+str(year)+'_GR_NASA/sbet_'+str(atm_path_date[-9:-1])+'.out.txt', skiprows=1)
#GET POSITION OF PLANE AND 1km MARKERS FROM POSAV
xp, yp, dist, km_idxs, km_utc_times = ro.get_pos_sections(posAV, m, along_track_res)
print 'ATM file:', atm_files_in_day[atm_file], str(atm_file)+'/'+str(size(atm_files_in_day))
if (narrow==1):
lonT, latT, elevationT, utc_timeT= ro.get_atmmerged(atm_files_in_day[atm_file], year, 1)
else:
lonT, latT, elevationT, utc_timeT= ro.get_atmqih5(atm_files_in_day[atm_file], year, 1)
#IF SIZE OF DATA IS LESS THAN SOME THRESHOLD THEN DONT BOTHER ANALYZING
xT, yT = m(lonT, latT)
#GET POSAV INDICES COINCIDING WITH START AND END OF ATM FILE. ADD PLUS/MINUS 1 FOR SOME LEEWAY.
start_i = np.abs(km_utc_times - utc_timeT[0]).argmin()
end_i = np.abs(km_utc_times - utc_timeT[-1]).argmin()
for section in xrange(section, section+1):
print section
mean_x, mean_y, mean_alt, mean_pitch, mean_roll, mean_vel = ro.posav_section_info(m, posAV[km_idxs[section]:km_idxs[section+1]] )
print 'Mean altitude:', mean_alt
print 'Mean pitch:', mean_pitch
print 'Mean roll:', mean_roll
if (abs(mean_alt-500)<200) & (abs(mean_pitch)<5) & (abs(mean_roll)<5):
poly_path, vertices, sides = ro.get_pos_poly(xp, yp, km_idxs[section], km_idxs[section+1])
xatm_km, yatm_km, elevation_km = ro.get_atm_poly(xT, yT, elevationT, km_utc_times, utc_timeT, poly_path, section)
if (size(xatm_km)>0):
lonDMS, latDMS = m(xatm_km[0], yatm_km[0], inverse=True)
lonDMS_str = '%.2f' %lonDMS
latDMS_str = '%.2f' %latDMS
print lonDMS_str, latDMS_str
xatm_km = xatm_km - np.amin(xatm_km)
yatm_km = yatm_km - np.amin(yatm_km)
spacing, minS, meanS, nearmaxS, maxS = ro.shot_spacing(xatm_km, yatm_km)
mean_altSTR=' %.0f' % mean_alt
mean_pitchSTR=' %.0f' % mean_pitch
mean_rollSTR=' %.0f' % mean_roll
mean_velSTR=' %.0f' % mean_vel
meanSSTR = ' %.2f' % meanS
nearmaxSSTR = ' %.2f' % nearmaxS
maxSSTR = ' %.2f' % maxS
plot_spacing()
|
akpetty/ibtopo2016
|
plot_spacing.py
|
Python
|
gpl-3.0
| 11,351
|
[
"NetCDF"
] |
3e1f0eed131e8d09b9864005ddbe0f83f84b9df1c925971087cab4fdd174f585
|
# -*- coding: utf-8 -*-
"""Utilities for reformatting and analyzing spiking data"""
import numpy as np
from copy import deepcopy
from itertools import product
from scipy import signal
from scipy.stats.mstats import zscore
from scipy.signal import medfilt
from scipy.signal import resample
from scipy.signal import welch, gaussian
from scipy.stats import entropy as scientropy
def create_times(t, dt):
n_steps = int(t * (1.0 / dt))
times = np.linspace(0, t, n_steps)
return times
def create_psd(lfp, inrate, outrate=1024):
"""Calculate PSD from LFP/EEG data."""
lfp = np.array(lfp)
if inrate != outrate:
lfp = signal.resample(lfp, int(lfp.shape[0] * outrate / inrate))
# Calculate PSD
return signal.welch(
lfp,
fs=outrate,
window='hanning',
nperseg=outrate,
noverlap=outrate / 2.0,
nfft=None,
detrend='linear',
return_onesided=True,
scaling='density')
def select_n(sel, ns, ts):
"""Select some neurons.
Params
------
sel : list
Which neurons to select
ts : array-like (1d)
Spike times
ns : array-like (1d)
Neurons
"""
if ns.shape != ts.shape:
raise ValueError("ns and ts must be the same shape")
m = np.zeros_like(ts, dtype=np.bool)
for n in sel:
m = np.logical_or(m, n == ns)
return ns[m], ts[m]
def to_spikes(ns, ts, T, N, dt):
"""Convert spike times to a grid and binary representation"""
if not np.allclose(T / dt, int(np.round(T / dt))):
raise ValueError("T is not evenly divsible by dt")
n_steps = int(T * (1.0 / dt))
times = np.linspace(0, T, n_steps)
spikes = np.zeros((n_steps, N))
for i, t in enumerate(ts):
n = ns[i]
idx = (np.abs(times - t)).argmin() # find closest
spikes[idx, n - 1] += 1
return spikes
def to_spiketimes(times, spikes):
"""Convert spikes to two 1d arrays"""
n_steps = len(times)
n = spikes.shape[1]
ns, ts = [], []
for i in range(n_steps):
for j in range(n):
if spikes[i, j] == 1:
ns.append(j) # save neuron and
ts.append(times[i]) # look up dt time
return np.array(ns), np.array(ts)
def to_spikedict(ns, ts):
"""Convert from seperate time and neuron
arrays to a neuron-keyed dict"""
d_sp = {}
for n, t in zip(ns, ts):
try:
d_sp[n].append(t)
except KeyError:
d_sp[n] = [
t,
]
for k in d_sp.keys():
d_sp[k] = np.array(d_sp[k])
return d_sp
def spikedict_to(d_sp):
""""Undoes `to_spikedict`"""
ts, ns = [], []
for n, ts_n in d_sp.iteritems():
ns.extend([n] * len(ts_n))
ts.extend(list(ts_n))
return np.array(ns), np.array(ts)
def ts_sort(ns, ts):
"""Sort by ts"""
ts = np.array(ts)
ns = np.array(ns)
idx = ts.argsort()
return ns[idx], ts[idx]
def bin_times(ts, t_range, dt):
"""ts into a grid of dt sized bins"""
if len(t_range) != 2:
raise ValueError("t_range must contain two elements")
if t_range[0] > t_range[1]:
raise ValueError("t_range[0] must be less then t_range[1]")
n_sample = int((t_range[1] - t_range[0]) * (1.0 / dt))
bins = np.linspace(t_range[0], t_range[1], n_sample)
binned, _ = np.histogram(ts[1:], bins=bins)
return bins[1:], binned
def coincidence_code(ts, ns, tol):
"""Define a spike-time coincidence code
Params
------
ts : array-like (1d)
Spike times
ns : array-like (1d)
Neurons
tol : numeric
How close two spikes must be to be coincident
"""
# The encoded sequence
encoded = []
ts_e = []
for i, t in enumerate(ts):
# Find which neurons fired in coincidence
# and count them. The count is the code.
m = np.isclose(t, ts, atol=tol)
n_set = np.sum(m)
encoded.append(n_set)
ts_e.append(t)
return np.asarray(encoded), np.asarray(ts_e)
def spike_time_code(ts, scale=1000, decimals=3):
"""Spike time code
Note: ts MUST be in seconds
Params
------
ts : array-like (1d)
Spike times
ns : array-like (1d)
Neurons
scale : numeric
Amount to scale `ts` by before int conversion
"""
scaled = np.round(ts, decimals) * scale
encoded = scaled.astype(np.int)
return encoded
def spike_window_code(ts, ns, w=1e-2, decimals=3):
"""Define a spike-time window code
Params
------
ts : array-like (1d)
Spike times
ns : array-like (1d)
Neurons
w : numeric (seconds)
Window size
n_digits : int
Number of significant digits
used to define spike-times
in a window around each t in
`ts`
"""
# The encoded sequences
decimals = int(decimals)
encoded = []
ts_e = []
# The encoding machinery
encoding = {}
master_code = 0
for t in ts:
# Find which neurons fired in coincidence
# and make them a set
m = np.isclose(t, ts, atol=w)
n_set = frozenset(np.round(ts[m], decimals))
# If this set isn't known yet use the
# master code to encode it
try:
encoding[n_set]
except KeyError:
encoding[n_set] = master_code
master_code += 1
# Finally do the encode
encoded.append(encoding[n_set])
return np.asarray(encoded), encoding
def rate_code(ts, t_range, dt, k=1):
"""Define a rate code
Params
------
ts : array-like
spike times
t_range : 2-tuple
(Min, Max) values of ts
dt : numeric
Window size which which to bin
k : numeric
The effective resolution of the rate code (Hz)
Notes
-----
While the absolute value/units of t, t_range
and dt do not matter, their values must be self
consistent.
"""
# resample, discretize and encode
# convert magnitudes to order of apperance
# e.g. [1, 2, 5, 1, 3, 4] becomes
# [1, 2, 3, 1, 4, 5]
# 1. bin times
t_bins, binned = bin_times(ts, t_range, dt)
# 2. norm rate ranage
max_r = binned.max()
n_d = np.int(np.ceil(max_r / k))
digitized = np.digitize(binned, np.linspace(0, max_r, n_d))
# 3. Encode by order of apperance
# Define encodes by order of appearance in digitized
# This eocnding makes the order of rate changes matter
# not the overall amplitude or exact binning details
# of the rate matter
# The encoding machinery
master_code = 0
encoding = {}
# The encoded sequence
encoded = []
for d in digitized:
# Init is this the first time seeing d
try:
encoding[d]
except KeyError:
encoding[d] = master_code
master_code += 1
encoded.append(encoding[d])
# return np.asarray(encoded), t_bins
return np.asarray(encoded), t_bins, encoding
def spike_triggered_average(ts, ns, trace, t_range, dt, srate):
"""Spike triggered average
Return the spike triggered average or trace, in a window
of width dt.
Params
------
ts : array
Spike times
trace : array
The data to average
t_range : 2-tuple
The (min, max) values to trace
dt : numeric
The window size
srate : numeric
The sampling rate of trace
"""
n_bins = int(np.ceil((2 * (dt * srate))))
bins = np.linspace(-dt, dt, n_bins)
n_steps = int(np.ceil(srate * t_range[1]))
times = np.linspace(t_range[0], t_range[1], n_steps)
sta = np.zeros(n_bins)
# Sanity: check for empty times or ns
# and return 0s in the sta if needed
if (ts.size == 0) or (ns.size == 0):
return sta, bins
for t, n in zip(ts, ns):
# Prevent over/underflow
if t < dt:
continue
if t > (t_range[1] - dt):
continue
# Define the window and sum it
m = np.logical_and(times >= (t - dt), times <= (t + dt))
sta += trace[n, m] # Avg over neurons at each t
sta /= ts.size # divide the sum by n -> the mean.
return sta, bins
def estimate_communication(times,
ns,
ts,
window,
coincidence_t=1e-3,
coincidence_n=20,
return_all=False,
time_step=1e-4):
# Define overall analysis window
t0 = window[0]
tn = window[1]
if tn + coincidence_t > times.max():
raise ValueError("Final window must be less than max value in times")
m = np.logical_and(t0 <= ts, ts <= tn)
ts = ts[m]
ns = ns[m]
# Calculate C for every possible coincidence (CC) window, for all time
Cs = []
for t in times:
# Get CC window
cc0 = t
ccn = t + coincidence_t
m = np.logical_and(cc0 <= ts, ts <= ccn)
# Count spikes in the window
C_t = 0
if ts[m].size > 0:
n_spikes = ts[m].size
C_t = max(n_spikes - coincidence_n, 0) / coincidence_n
Cs.append(C_t)
# Find avg C
C = np.max(Cs)
out = C
if return_all:
out = (C, Cs)
return out
def entopy(X):
"""Entropy for a list of symbols, X."""
if X.ndim != 1:
raise ValueError("X must be 1d.")
# Init
X = np.asarray(X)
# Est P(.) for each symbol in X
probs = []
for c1 in set(X):
probs.append(np.mean(c1 == X))
probs = np.asarray(probs)
probs = probs[probs > 0]
return -np.sum(probs * np.log2(probs))
def cond_entropy(X, Y):
"""Conditional entropy for lists of symbols, X and Y."""
if X.ndim != 1:
raise ValueError("X must be 1d.")
if Y.ndim != 1:
raise ValueError("Y must be 1d.")
# Init
X = np.asarray(X)
Y = np.asarray(Y)
# Est P(.) for each symbol in X
probs = []
for c1 in set(X):
for c2 in set(Y):
probs.append(np.mean(np.logical_and(X == c1, Y == c2)))
probs = np.asarray(probs)
if np.isnan(probs).sum() > 0:
print(probs)
raise ValueError("p est if very off")
probs = probs[probs > 0]
return -np.sum(probs * np.log2(probs))
def mi(X, Y):
"""Discrete mutual information (no bias correction)
Note: Only supports 1d inputs, and integer values.
"""
if (not np.issubdtype(X.dtype, np.int)) or (not np.issubdtype(
Y.dtype, int)):
raise ValueError("Input should have integer dtype")
if (len(X.shape) > 1) or (len(Y.shape) > 1):
raise ValueError("Only 1D inputs supported")
return entopy(X) + entopy(Y) - cond_entropy(X, Y)
def kl_divergence(a, b):
"""Calculate the K-L divergence between a and b
Note: a and b must be two sequences of integers
"""
a = np.asarray(a)
b = np.asarray(b)
# Find the total set of symbols
a_set = set(a)
b_set = set(b)
ab_set = a_set.union(b_set)
# Create a lookup table for each symbol in p_a/p_b
lookup = {}
for i, x in enumerate(ab_set):
lookup[x] = i
# Calculate event probabilities for and then b
# To prevent nan/division errors every event
# gets at least a 1 count.
p_a = np.ones(len(ab_set))
for x in a:
p_a[lookup[x]] += 1
p_b = np.ones(len(ab_set))
for x in b:
p_b[lookup[x]] += 1
# Norm counts into probabilities
p_a /= a.size
p_b /= b.size
return scientropy(p_a, p_b, base=2)
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
Note: a and b are two sequences
"""
a = list(a)
b = list(b)
n, m = len(a), len(b)
# Make sure n <= m, to use O(min(n,m)) space
if n > m:
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[-1]
def kappa(ns1, ts1, ns2, ts2, t_range, dt):
"""Measure Bruno's Kappa correlation[0].
[0]: Wang, X.-J. & Buzsaki, G., 1996. Gamma Oscillation by Synaptic
Inhibition in a Hippocampal Interneuronal Network Model. J. Neurosci.,
16(20), pp.6402–6413.
"""
if len(ts1) == 0:
return 0.0
if len(ts2) == 0:
return 0.0
d_1 = to_spikedict(ns1, ts1)
d_2 = to_spikedict(ns2, ts2)
pairs = product(list(np.unique(ns1)), list(np.unique(ns2)))
corrs = []
for n1, n2 in pairs:
_, b1 = bin_times(d_1[n1], t_range, dt)
_, b2 = bin_times(d_2[n2], t_range, dt)
b1[b1 > 1] = 1
b2[b2 > 1] = 1
corrs.append(_kappa(b1, b2))
return np.nanmean(corrs)
def _kappa(bini, binj):
return np.sum(bini * binj) / np.sqrt(np.sum(bini) * np.sum(binj))
def fano(ns, ts):
"""Calculate isi Fano"""
d_sp = isi(ns, ts)
d_fano = {}
for n, v in d_sp.items():
d_fano[n] = v.std()**2 / v.mean()
return d_fano
def isi(ns, ts):
"""Return ISIs, in a neuron-keyed dict"""
d_sp = to_spikedict(ns, ts)
d_isi = {}
for k, v in d_sp.items():
tlast = 0
intervals = []
for t in v:
intervals.append(t - tlast)
tlast = deepcopy(t)
d_isi[k] = np.array(intervals)
return d_isi
def spiketimes_to_coincidences(ns, ts, tol):
t_cc = []
n_cc = []
for i, t in enumerate(ts):
cc = np.isclose(t, ts, atol=tol)
t_cc.append()
ccs[i] += (cc.sum() - 1) # Will always match self
def detect_coincidences(ns, ts, tol):
ccs = np.zeros_like(ts)
for i, t in enumerate(ts):
cc = np.isclose(t, ts, atol=tol)
ccs[i] += (cc.sum() - 1) # Will always match self
return ccs
def increase_coincidences(ns, ts, k, p, N, prng=None):
if prng == None:
prng = np.random.RandomState()
ts_cc = ts.copy()
moved = []
for i, t in enumerate(ts):
if i in moved:
continue
if p <= prng.rand():
# k_p = prng.randint(1, k + 1)
k_p = k
for j in range(1, k_p + 1):
try:
loc = i + j
ts_cc[loc] = t
moved.append(loc)
except IndexError:
pass
return ns, ts_cc
def precision(ns, ts, ns_ref, ts_ref, combine=True):
"""Analyze spike time precision (jitter)
Parameters
----------
ns : array-list (1d)
Neuron codes
ts : array-list (1d, seconds)
Spikes times
ns_ref : array-list (1d)
Neuron codes for the reference train
ts_ref : array-list (1d, seconds)
Spikes times for the reference train
"""
prec = []
ns_prec = []
# Join all ns, into the '0' key?
if combine:
ns = np.zeros_like(ns)
ns_ref = np.zeros_like(ns_ref)
# isolate units, and reformat
ref = to_spikedict(ns_ref, ts_ref)
target = to_spikedict(ns, ts)
# analyze precision
for n, r in ref.iteritems():
try:
x = target[n]
except KeyError:
x = np.zeros_like(r)
minl = min(len(r), len(x))
diffs = np.abs([r[i] - x[i] for i in range(minl)])
prec.append(np.mean(diffs))
ns_prec.append(n)
# If were are combining return scalars
# not sequences
if combine:
prec = prec[0]
ns_prec = ns_prec[0]
return ns_prec, prec
def seperation(ns_1, ts_1, ns_2, ts_2, dt, T=None):
"""Estimate the distance between two populations,
using the Gaussian smoothed PSTH.
Parameters
----------
ns_1 : array-list (1d)
Neuron codes
ts_1 : array-list (1d, seconds)
Spikes times
ns_2 : array-list (1d)
Neuron codes
ts_2 : array-list (1d, seconds)
Spikes times
dt : numeric
Sampling resolution
"""
# calculate the PSTH
if T is None:
T = max(ts_1.max(), ts_2.max())
N = max(ns_1.max(), ns_2.max())
psth1 = to_spikes(ns_1, ts_1, T, N, dt).sum(1)
psth2 = to_spikes(ns_2, ts_2, T, N, dt).sum(1)
# convolve, sigma 5 ms
sigma = 5e-3
lwin = int((3 * sigma) / dt)
g = gaussian(lwin, sigma / dt)
# calculate the seperation
psth1 = np.convolve(psth1, g)[0:psth1.shape[0]]
psth2 = np.convolve(psth2, g)[0:psth2.shape[0]]
sd1 = np.std(psth1)
sd2 = np.std(psth2)
sd = np.sqrt((sd1**2) + (sd2**2))
return np.abs(psth1 - psth2) / sd, psth1, psth2
def dendritic_kernel(tau_rise, tau_decay, dt, gmax=1):
"""Simulate a PSP, with a double exponential."""
# 10 half lives is enough to capture the response...
n_syn_samples = ((tau_decay * 10) / dt)
t0 = np.linspace(0, tau_decay * 10, n_syn_samples)
tpeak = tau_decay * tau_rise / (
tau_decay - tau_rise) * np.log(tau_decay / tau_rise)
normf = 1 / (-np.exp(-tpeak / tau_rise) + np.exp(-tpeak / tau_decay))
g = (-np.exp(-t0 / tau_rise) + np.exp(-t0 / tau_decay)) * normf
g *= gmax
return g
def dendritic_lfp(ns,
ts,
N,
T,
tau_rise=0.00009,
tau_decay=5e-3,
gmax=1,
dt=0.001,
norm=True):
"""Simulate LFP by convloving spikes with a double exponential
kernel
Parameters
----------
ns : array-list (1d)
Neuron codes (integers)
ts : array-list (1d, seconds)
Spikes times
N : numeric
Total number of neurons
T : number
Total length of firing
tau_rise : numeric (default: 0.00009)
The rise time of the synapse
tau_decay : numeric (default: 0.0015)
The decay time of the synapse
dt : numeric (default: 0.001)
Time resolution (in seconds)
gmax : numeric
PSP max height
Note: Assumes spikes is 1 or 2d, and *column
oriented*
"""
spikes = to_spikes(ns, ts, T, N, dt)
if spikes.ndim > 2:
raise ValueError("spikes must be 1 of 2d")
if tau_rise < 0:
raise ValueError("tau_rise must be > 0")
if tau_decay < 0:
raise ValueError("tau_decay must be > 0")
if dt < 0:
raise ValueError("dt must be > 0")
# Enforce col orientation if 1d
if spikes.ndim == 1:
spikes = spikes[:, np.newaxis]
# make LFP
g = dendritic_kernel(tau_rise, tau_decay, dt, gmax)
spsum = spikes.astype(np.float).sum(1)
lfps = np.convolve(spsum, g)[0:spikes.shape[0]]
if norm:
lfps = zscore(lfps)
return lfps
def soma_lfp(ns, ts, N, T, tau=0.002, dt=.001, norm=True):
"""Simulate LFP (1d) bu convlution with an 'alpha' kernel.
Parameters
----------
ns : array-list (1d)
Neuron codes (integers)
ts : array-list (1d, seconds)
Spikes times
tau : numeric (default: 0.001)
The alpha estimate time constant
dt : numeric (default: 0.001, seconds)
Step time
"""
spikes = to_spikes(ns, ts, T, N, dt)
if spikes.ndim > 2:
raise ValueError("spikes must be 1 of 2d")
if tau < 0:
raise ValueError("tau must be > 0")
if dt < 0:
raise ValueError("dt must be > 0")
# Enforce col orientation if 1d
if spikes.ndim == 1:
spikes = spikes[:, np.newaxis]
# 10 x tau (10 half lives) should be enough to span the
# interesting parts of g, the alpha function we are
# using to convert broadband firing to LFP
# a technique we are borrowing from:
#
# http://www.ncbi.nlm.nih.gov/pubmed/20463210
#
# then abusing a bit (too much?).
#
# We want 10*tau but we have to resample to dt time first
n_alpha_samples = ((tau * 10) / dt)
t0 = np.linspace(0, tau * 10, n_alpha_samples)
# Define the alpha (g notation borrow from BV's initial code)
gmax = 0.1
g = gmax * (t0 / tau) * np.exp(-(t0 - tau) / tau)
# make LFP
spsum = spikes.astype(np.float).sum(1)
spsum /= spsum.max()
lfps = np.convolve(spsum, g)[0:spikes.shape[0]]
if norm:
lfps = zscore(lfps)
return lfps
|
voytekresearch/fakespikes
|
fakespikes/util.py
|
Python
|
mit
| 20,574
|
[
"Gaussian",
"NEURON"
] |
5de859abbaa36804d9f01480239d814c8d6373c92403ac7e04e9288a2a25cf1f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import vtk
import vtk.test.Testing
import math
month_labels = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
book = [5675, 5902, 6388, 5990, 5575, 7393, 9878, 8082, 6417, 5946, 5526, 5166]
new_popular = [701, 687, 736, 696, 750, 814, 923, 860, 786, 735, 680, 741]
periodical = [184, 176, 166, 131, 171, 191, 231, 166, 197, 162, 152, 143]
audiobook = [903, 1038, 987, 1073, 1144, 1203, 1173, 1196, 1213, 1076, 926, 874]
video = [1524, 1565, 1627, 1445, 1179, 1816, 2293, 1811, 1588, 1561, 1542, 1563]
class TestStackedPlot(vtk.test.Testing.vtkTest):
def testStackedPlot(self):
"Test if stacked plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0,1.0,1.0)
view.GetRenderWindow().SetSize(400,300)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
# Create a table with some data in it
table = vtk.vtkTable()
arrMonthLabels = vtk.vtkStringArray()
arrMonthPositions = vtk.vtkDoubleArray()
arrMonth = vtk.vtkIntArray()
arrMonth.SetName("Month")
arrBooks = vtk.vtkIntArray()
arrBooks.SetName("Books")
arrNew = vtk.vtkIntArray()
arrNew.SetName("New / Popular")
arrPeriodical = vtk.vtkIntArray()
arrPeriodical.SetName("Periodical")
arrAudiobook = vtk.vtkIntArray()
arrAudiobook.SetName("Audiobook")
arrVideo = vtk.vtkIntArray()
arrVideo.SetName("Video")
numMonths = 12
for i in range(0,numMonths):
arrMonthLabels.InsertNextValue(month_labels[i])
arrMonthPositions.InsertNextValue(float(i))
arrMonth.InsertNextValue(i)
arrBooks.InsertNextValue(book[i])
arrNew.InsertNextValue(new_popular[i])
arrPeriodical.InsertNextValue(periodical[i])
arrAudiobook.InsertNextValue(audiobook[i])
arrVideo.InsertNextValue(video[i])
table.AddColumn(arrMonth)
table.AddColumn(arrBooks)
table.AddColumn(arrNew)
table.AddColumn(arrPeriodical)
table.AddColumn(arrAudiobook)
table.AddColumn(arrVideo)
# Set up the X Labels
chart.GetAxis(1).SetCustomTickPositions(arrMonthPositions, arrMonthLabels)
chart.GetAxis(1).SetMaximum(11)
chart.GetAxis(1).SetBehavior(vtk.vtkAxis.FIXED)
chart.SetShowLegend(True)
# Create the stacked plot
stack = chart.AddPlot(3)
stack.SetUseIndexForXSeries(True)
stack.SetInputData(table)
stack.SetInputArray(1,"Books")
stack.SetInputArray(2,"New / Popular")
stack.SetInputArray(3,"Periodical")
stack.SetInputArray(4,"Audiobook")
stack.SetInputArray(5,"Video")
# Set up a nice color series
colorSeries = vtk.vtkColorSeries()
colorSeries.SetColorScheme(2)
stack.SetColorSeries(colorSeries)
view.GetRenderWindow().SetMultiSamples(0)
view.GetRenderWindow().Render()
img_file = "TestStackedPlot.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),
vtk.test.Testing.getAbsImagePath(img_file),
threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestStackedPlot, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Charts/Core/Testing/Python/TestStackedPlot.py
|
Python
|
gpl-3.0
| 3,585
|
[
"VTK"
] |
f5bb9187577298409b118f370a050e5d039990bac09097b16b094aeb5b3487d3
|
"""
This example checks plotting functions for binary sources. It is
derived from example_11_binary_source.py
"""
import MulensModel as mm
import matplotlib.pyplot as plt
import numpy as np
# First, prepare the data. There is nothing very exciting in this part,
# so you may skip it.
t_0_1 = 6100.
u_0_1 = 0.2
t_0_2 = 6140.
u_0_2 = 0.01
t_E = 25.
model_1 = mm.Model({'t_0': t_0_1, 'u_0': u_0_1, 't_E': t_E})
model_2 = mm.Model({'t_0': t_0_2, 'u_0': u_0_2, 't_E': t_E})
def generate_time_vector(n_a, n_b):
"""Generate sorted array simulating survey + follow-up time vector."""
time_a = np.linspace(6000., 6300., n_a)
time_b = np.linspace(6139., 6141., n_b)
time = np.sort(np.concatenate((time_a, time_b)))
return time
def generate_dataset(time, flux_1, flux_2, blend_flux, flux_err,
model_1, model_2):
"""Generate simulated dataset assuming binary source model."""
A_1 = model_1.get_magnification(time)
A_2 = model_2.get_magnification(time)
flux = A_1 * flux_1 + A_2 * flux_2 + blend_flux
err_flux = flux_err + 0. * time
flux += flux_err * np.random.normal(size=len(time))
my_dataset = mm.MulensData([time, flux, err_flux], phot_fmt='flux')
return my_dataset
assumed_flux_1 = 100.
assumed_flux_2 = 5.
assumed_flux_blend = 10.
n_a = 1000
n_b = 600
flux_err = 6.
time = generate_time_vector(n_a, n_b)
my_dataset = generate_dataset(
time, assumed_flux_1, assumed_flux_2, assumed_flux_blend, flux_err,
model_1, model_2)
time_2 = generate_time_vector(int(n_a / 5), int(n_b / 5))
my_dataset_2 = generate_dataset(
time_2, assumed_flux_1/2., assumed_flux_2/2., assumed_flux_blend/2.,
2.*flux_err, model_1, model_2)
# Model
params = {'t_0_1': t_0_1, 'u_0_1': u_0_1, 't_0_2': t_0_2, 'u_0_2': u_0_2,
't_E': t_E}
my_model = mm.Model(params)
my_event = mm.Event(datasets=[my_dataset, my_dataset_2], model=my_model)
# Plot just the data
plt.figure()
plt.title('Raw Data')
my_dataset.plot(phot_fmt='mag')
my_dataset_2.plot(phot_fmt='mag')
(source_flux, blend_flux) = my_event.get_ref_fluxes()
# Plot just the model:
# Plot the model in "effective" magnification
plt.figure(figsize=(6, 8))
plt.subplot(3, 1, 1)
plt.title('Model Magnification')
my_model.plot_magnification(source_flux_ratio=assumed_flux_2 / assumed_flux_1)
# Plot the model in magnitudes
# - specifying f_source
plt.subplot(3, 1, 2)
plt.title('Model Lightcurve with Specified Fluxes')
my_model.plot_lc(source_flux=source_flux, blend_flux=blend_flux)
# - specifying q_flux (using assumed values, so should be different from prev,
# which uses fitted fluxes)
plt.subplot(3, 1, 3)
plt.title('Model Lightcurve with q_flux')
my_model.plot_lc(
source_flux=assumed_flux_1, blend_flux=assumed_flux_blend,
source_flux_ratio=assumed_flux_2 / assumed_flux_1)
plt.tight_layout()
# Plot the model and data from Event()
plt.figure()
plt.title('Event() Model + Data')
my_event.plot_model(zorder=10, color='black')
my_event.plot_data()
plt.show()
|
rpoleski/MulensModel
|
examples/example_17_1L2S_plotting.py
|
Python
|
mit
| 3,004
|
[
"exciting"
] |
ec343bb40fea92ea38e8c2bc5e67a0af415ecfdc3aece64827be3fb3544c9240
|
# -*- coding: utf-8 -*-
"""Fragments.
The addition of a fragment results in an entry called :data:`pybel.constants.VARIANTS`
in the data dictionary associated with a given node. This entry is a list with dictionaries
describing each of the variants. All variants have the entry :data:`pybel.constants.KIND` to identify whether it is
a PTM, gene modification, fragment, or HGVS variant. The :data:`pybel.constants.KIND` value for a fragment is
:data:`pybel.constants.FRAGMENT`.
Each fragment contains an identifier, which is a dictionary with the namespace and name, and can optionally include
the position ('pos') and/or amino acid code ('code').
For example, the node :code:`p(HGNC:GSK3B, frag(45_129))` is represented with the following:
.. code-block:: python
from pybel.constants import *
{
FUNCTION: PROTEIN,
NAMESPACE: 'HGNC',
NAME: 'GSK3B',
VARIANTS: [
{
KIND: FRAGMENT,
FRAGMENT_START: 45,
FRAGMENT_STOP: 129,
},
],
}
Additionally, nodes can have an asterick (*) or question mark (?) representing unbound
or unknown fragments, respectively.
A fragment may also be unknown, such as in the node :code:`p(HGNC:GSK3B, frag(?))`. This
is represented with the key :data:`pybel.constants.FRAGMENT_MISSING` and the value of '?' like:
.. code-block:: python
from pybel.constants import *
{
FUNCTION: PROTEIN,
NAMESPACE: 'HGNC',
NAME: 'GSK3B',
VARIANTS: [
{
KIND: FRAGMENT,
FRAGMENT_MISSING: '?',
},
],
}
.. seealso::
- BEL 2.0 specification on `proteolytic fragments (2.2.3) <http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#_proteolytic_fragments>`_
- PyBEL module :py:class:`pybel.parser.modifiers.get_fragment_language`
"""
from pyparsing import And, Keyword, Optional, ParserElement, Suppress
from pyparsing import pyparsing_common as ppc
from ..utils import WCW, nest, one_of_tags, quote
from ...constants import (
FRAGMENT,
FRAGMENT_DESCRIPTION,
FRAGMENT_MISSING,
FRAGMENT_START,
FRAGMENT_STOP,
KIND,
)
__all__ = [
"get_fragment_language",
]
fragment_tag = one_of_tags(tags=["frag", "fragment"], canonical_tag=FRAGMENT, name=KIND)
fragment_range = (ppc.integer | "?")(FRAGMENT_START) + "_" + (ppc.integer | "?" | "*")(FRAGMENT_STOP)
missing_fragment = Keyword("?")(FRAGMENT_MISSING)
def get_fragment_language() -> ParserElement:
"""Build a protein fragment parser."""
_fragment_value_inner = fragment_range | missing_fragment(FRAGMENT_MISSING)
_fragment_value = _fragment_value_inner | And([Suppress('"'), _fragment_value_inner, Suppress('"')])
parser_element = fragment_tag + nest(_fragment_value + Optional(WCW + quote(FRAGMENT_DESCRIPTION)))
return parser_element
|
pybel/pybel
|
src/pybel/parser/modifiers/fragment.py
|
Python
|
mit
| 2,907
|
[
"Pybel"
] |
6441bba651cc0c4a674f2f9e3aa8b65905287cb658714da928329319c52f77ad
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('bb8.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# BB8 control
url(r'^control/', include('bb8.control.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
Jurevic/BB-8_droid
|
config/urls.py
|
Python
|
mit
| 1,618
|
[
"VisIt"
] |
49cba0b2a92af4431271db1b4c3d26476b5bc457693e0735499c70069d5b64c2
|
# -*- Python -*-
#
# @file test_2D_2D_diag.py
# @brief MultivariateRandomMixture validation tests
#
# Copyright (C) 2013 EADS IW France
#
# Author(s) : Denis Barbier, IMACS
# Sofiane Haddad, IMACS
#
# This program is free software; you can redistribute it and/or
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests of MultivariateRandomMixture
===================================
Test 2: R^2-->R^2 case - Diagonal matrix test for
the validation of the PythonMultivariateRandomMixture class
"""
if __name__ == "__main__":
import openturns as ot
import MultivariateRandomMixture as MV
import numpy as np
from MultivariateGaussianCharacteristicFunction import MGCF as mgcf
N = 200000
"""
Test
------
The matrix is assumed to be diagonal for validation purposes
"""
collection = ot.DistributionCollection([ot.Normal(0.0, 1.0), ot.Normal(0.0, 1.0)])
matrix = ot.Matrix([[4, 0], [0, 1.4]])
distribution = MV.PythonMultivariateRandomMixture(collection, matrix)
interval = distribution.getRange()
mean = distribution.getMean()
cov = distribution.getCovariance()
sigma = distribution.getStandardDeviation()
sample = distribution.getSample(N)
print "range = ", interval
print "mean = ", mean
print "cov = ", cov
print "sigma = ", sigma
# Equivalent Normal2D distribution
normal2D = ot.Normal(distribution.getMean(), distribution.getCovariance())
print "Gaussian distribution"
print "range = ", normal2D.getRange()
print "mean = ", normal2D.getMean()
print "cov = ", normal2D.getCovariance()
print "sigma = ", normal2D.getStandardDeviation()
print "sample :"
print "min = %s\nmax = %s\nmean = %s, cov = %s" %(sample.getMin(),sample.getMax(), sample.computeMean(), sample.computeCovariance())
# evaluation of the characteristic function in [xmin,ymin]x[xmax,ymax]
xmin = -1.0
ymin = -1.0
xmax = 1.0
ymax = 1.0
dx = 0.2
dy = 0.2
# 2D grid
x = np.arange(xmin, xmax + dx, dx)
y = np.arange(ymin, ymax + dy, dy)
# Compute delta(cf)
delta = 0.0
for valuex in x:
for valuey in y:
c1 = distribution.computeCharacteristicFunction([valuex, valuey])
c2 = mgcf(normal2D, [valuex, valuey])
delta += abs((c1 - c2))**2
# Variation of characteristic function
delta /= (len(x) * len(y))
print "delta of characteristic function=%s" %(np.sqrt(delta))
|
sofianehaddad/MVRM
|
test/test_2N_2N_diag.py
|
Python
|
lgpl-3.0
| 3,094
|
[
"Gaussian"
] |
cd3928da012620abff538dfcc9129f1ed55e1a71e8337c522966d231fba28216
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.backend.python.util_rules.pex import PexInterpreterConstraints
from pants.backend.python.util_rules.pex_environment import PythonExecutable
from pants.core.util_rules.source_files import SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Sources
from pants.util.logging import LogLevel
from pants.util.memo import memoized_property
from pants.util.ordered_set import FrozenOrderedSet
_SCRIPT = """\
# -*- coding: utf-8 -*-
# NB: This must be compatible with Python 2.7 and 3.5+.
from __future__ import print_function, unicode_literals
from io import open
import ast
import os.path
import re
import sys
# This regex is used to infer imports from strings, e.g.
# `importlib.import_module("example.subdir.Foo")`.
STRING_IMPORT_REGEX = re.compile(r"^([a-z_][a-z_\\d]*\\.){2,}[a-zA-Z_]\\w*$")
class AstVisitor(ast.NodeVisitor):
def __init__(self, package_parts):
self._package_parts = package_parts
self.explicit_imports = set()
self.string_imports = set()
def maybe_add_string_import(self, s):
if STRING_IMPORT_REGEX.match(s):
self.string_imports.add(s)
def visit_Import(self, node):
for alias in node.names:
self.explicit_imports.add(alias.name)
def visit_ImportFrom(self, node):
if node.level:
# Relative import.
rel_module = node.module
abs_module = ".".join(
self._package_parts[0 : len(self._package_parts) - node.level + 1]
+ ([] if rel_module is None else [rel_module])
)
else:
abs_module = node.module
for alias in node.names:
self.explicit_imports.add("{}.{}".format(abs_module, alias.name))
def visit_Call(self, node):
# Handle __import__("string_literal"). This is commonly used in __init__.py files,
# to explicitly mark namespace packages. Note that we don't handle more complex
# uses, such as those that set `level`.
if (
isinstance(node.func, ast.Name)
and node.func.id == "__import__"
and len(node.args) == 1
):
if sys.version_info[0:2] < (3, 8) and isinstance(node.args[0], ast.Str):
arg_s = node.args[0].s
val = arg_s.decode("utf-8") if isinstance(arg_s, bytes) else arg_s
self.explicit_imports.add(arg_s)
return
elif isinstance(node.args[0], ast.Constant):
self.explicit_imports.add(str(node.args[0].value))
return
self.generic_visit(node)
# String handling changes a bit depending on Python version. We dynamically add the appropriate
# logic.
if sys.version_info[0:2] == (2,7):
def visit_Str(self, node):
val = node.s.decode("utf-8") if isinstance(node.s, bytes) else node.s
self.maybe_add_string_import(val)
setattr(AstVisitor, 'visit_Str', visit_Str)
elif sys.version_info[0:2] < (3, 8):
def visit_Str(self, node):
self.maybe_add_string_import(node.s)
setattr(AstVisitor, 'visit_Str', visit_Str)
else:
def visit_Constant(self, node):
if isinstance(node.value, str):
self.maybe_add_string_import(node.value)
setattr(AstVisitor, 'visit_Constant', visit_Constant)
def parse_file(filename):
with open(filename, "rb") as f:
content = f.read()
try:
return ast.parse(content, filename=filename)
except SyntaxError:
return None
if __name__ == "__main__":
explicit_imports = set()
string_imports = set()
for filename in sys.argv[1:]:
tree = parse_file(filename)
if not tree:
continue
package_parts = os.path.dirname(filename).split(os.path.sep)
visitor = AstVisitor(package_parts)
visitor.visit(tree)
explicit_imports.update(visitor.explicit_imports)
string_imports.update(visitor.string_imports)
print("\\n".join(sorted(explicit_imports)))
print("\\n--")
print("\\n".join(sorted(string_imports)))
"""
@dataclass(frozen=True)
class ParsedPythonImports:
"""All the discovered imports from a Python source file.
Explicit imports are imports from `import x` and `from module import x` statements. String
imports come from strings that look like module names, such as
`importlib.import_module("example.subdir.Foo")`.
"""
explicit_imports: FrozenOrderedSet[str]
string_imports: FrozenOrderedSet[str]
@memoized_property
def all_imports(self) -> FrozenOrderedSet[str]:
return FrozenOrderedSet(sorted([*self.explicit_imports, *self.string_imports]))
@dataclass(frozen=True)
class ParsePythonImportsRequest:
sources: Sources
interpreter_constraints: PexInterpreterConstraints
@rule
async def parse_python_imports(request: ParsePythonImportsRequest) -> ParsedPythonImports:
python_interpreter, script_digest, stripped_sources = await MultiGet(
Get(PythonExecutable, PexInterpreterConstraints, request.interpreter_constraints),
Get(Digest, CreateDigest([FileContent("__parse_python_imports.py", _SCRIPT.encode())])),
Get(StrippedSourceFiles, SourceFilesRequest([request.sources])),
)
input_digest = await Get(
Digest, MergeDigests([script_digest, stripped_sources.snapshot.digest])
)
process_result = await Get(
ProcessResult,
Process(
argv=[
python_interpreter.path,
"./__parse_python_imports.py",
*stripped_sources.snapshot.files,
],
input_digest=input_digest,
description=f"Determine Python imports for {request.sources.address}",
level=LogLevel.DEBUG,
),
)
explicit_imports, _, string_imports = process_result.stdout.decode().partition("--")
return ParsedPythonImports(
explicit_imports=FrozenOrderedSet(explicit_imports.strip().splitlines()),
string_imports=FrozenOrderedSet(string_imports.strip().splitlines()),
)
def rules():
return collect_rules()
|
jsirois/pants
|
src/python/pants/backend/python/dependency_inference/import_parser.py
|
Python
|
apache-2.0
| 6,496
|
[
"VisIt"
] |
b6ef8e84b62c5c142775ea0ca29903f2cf2dce9a40ef7518458f8014b93e3be1
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
waterponey/scikit-learn
|
sklearn/discriminant_analysis.py
|
Python
|
bsd-3-clause
| 28,643
|
[
"Gaussian"
] |
53941702885df991c2269ebb70da356c078280fc1fad380e721b50f347979c0a
|
import pytest, py
from _pytest.main import Session
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini("""
[pytest]
testpaths = gui uts
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems('-v')
assert [x.name for x in items] == ['test_gui', 'test_uts']
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ('env', 'gui', 'uts'):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ['test_%s' % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ('env', 'gui', 'uts'):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ['test_%s' % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_collection_error(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
result = testdir.runpytest(p)
assert "__import__" not in result.stdout.str(), "too long traceback"
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*mport*not_exists*"
])
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return True
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
assert "1 passed" in result.stdout.str()
result = testdir.runpytest()
assert result.ret == 0
assert "1 passed" not in result.stdout.str()
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest("""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
""")
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest("""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
""")
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule*",
"*test_x*"
])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest("""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
""")
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest("""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
""")
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule1*",
"*MyModule2*",
"*test_x*"
])
class TestSession:
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
#rootid = rcol.nodeid
#root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
#assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
py.std.pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
("pytest_collectreport", "report.nodeid == ''")
])
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
normid = p.basename + "::TestClass::()::test_method"
for id in [p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest("""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
""" % p.basename)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
py.std.pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains([
("pytest_collectstart",
"collector.fspath == collector.session.fspath"),
("pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'"),
("pytest_collectstart",
"collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
#("pytest_collectreport",
# "report.fspath == %r" % str(rcol.fspath)),
])
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport",
"report.nodeid.startswith('aaa/test_aaa.py')"),
])
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
])
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
arg = p.basename + ("::TestClass::test_method")
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.name == testdir.tmpdir.basename
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile("""
def test_1():
pass
def test_2():
pass
""")
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile('''
def testone():
pass
class TestX:
def testmethod_one(self):
pass
class TestY(TestX):
pass
''')
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == 'testone'
assert items[1].name == 'testmethod_one'
assert items[2].name == 'testmethod_one'
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini("""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
""")
p = testdir.makepyfile('''
class MyTestSuite:
def x_test(self):
pass
class TestCase:
def test_y(self):
pass
''')
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ['MyTestSuite.x_test', 'TestCase.test_y']
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest("""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2:
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
""")
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*",
])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines([
"*1 passed*",
])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
l = list(modcol.keywords)
assert modcol.name in l
for x in l:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile("""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
""")
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
|
Akasurde/pytest
|
testing/test_collection.py
|
Python
|
mit
| 23,112
|
[
"VisIt"
] |
85a6769febef1356da22595cdb4845170dc114c3c657b5cefca013288f7c7ea8
|
from py.config.options import Boolean, Integer, String, StringList
class ALLOW_IRQ_NESTING(Boolean):
value = True
tag = ["build"]
undef = True
descr = "If set to !0, allow nested irq processing"
class ARM_CLK(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Arm clock in hz"
class BENCHMARK_IRQ_PROCESSING(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If set to !0, enable code to benchmark irq processing"
class BFIN_ON_SKYEYE(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, disable features which are not supported on skyeye.
"""
class BSP(StringList):
value = []
tag = ["general"]
undef = True
descr = "List of bsps to build, comma delimited."
class BSP_CONSOLE_BAUD(Integer):
value = 9600
tag = ["general"]
undef = True
descr = "The default console baud rate."
class BSP_CPU_CLOCK_SPEED(Integer):
value = 0
tag = ["general"]
undef = True
descr = "The cpu clock frequency."
class BSP_DATA_CACHE_ENABLED(Boolean):
value = True
tag = ["storage"]
undef = True
descr = "Enables the data cache, if defined to a value other than zero"
class BSP_DIRTY_MEMORY(Boolean):
value = False
tag = ["storage"]
undef = True
descr = """
If defined, then the bsp framework will put a non-zero pattern into the rtems
workspace and C program heap. This should assist in finding code that assumes
memory starts set to zero.
"""
class BSP_DISABLE_UBOOT_WORK_AREA_CONFIG(Integer):
value = 1
tag = ["general"]
undef = True
descr = "Disable u-boot work area configuration"
class BSP_GPIOPCR_INITMASK(String):
value = "0x330F0F77"
tag = ["general"]
undef = True
descr = """
Defines the bits modified in the mpc5200 gpiopcr register during init. Must
match the hardware requirements
"""
class BSP_GPIOPCR_INITVAL(String):
value = "0x01050444"
tag = ["general"]
undef = True
descr = """
Defines the bit values written in the mpc5200 gpiopcr register during init.
Must match the hardware requirements
"""
class BSP_HAS_RM52xx(Integer):
value = 1
tag = ["build"]
undef = True
descr = "This bsp has a rm52xx compatible cpu."
class BSP_HAS_SMP(Integer):
value = 1
tag = ["build"]
undef = True
descr = """
Always defined when on a pc386 to enable the pc386 support for determining
the cpu core number in an smp configuration.
"""
class BSP_HAS_TX49xx(Integer):
value = 1
tag = ["build"]
undef = True
descr = "This bsp has a rm52xx compatible cpu."
class BSP_HAS_USC320(Integer):
value = 1
tag = ["build"]
undef = True
descr = "This bsp has a v3 usc320 system controller chip."
class BSP_INSTRUCTION_CACHE_ENABLED(Boolean):
value = True
tag = ["storage"]
undef = True
descr = """
Enables the instruction cache, if defined to a value other than zero
"""
class BSP_INTERRUPT_STACK_AT_WORK_AREA_BEGIN(Integer):
value = 1
tag = ["general"]
undef = True
descr = "Indicate that the interrupt stack is at the work area begin"
class BSP_LEON3_SMP(Integer):
value = 1
tag = ["build"]
undef = True
descr = """
Always defined when on a leon3 to enable the leon3 support for determining
the cpu core number in an smp configuration.
"""
class BSP_PRESS_KEY_FOR_RESET(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, print a message and wait until pressed before resetting board when
application exits.
"""
class BSP_RESET_BOARD_AT_EXIT(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, reset the board when the application exits."
class BSP_SMALL_MEMORY(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable testsuite samples with high memory demands"
class BSP_START_RESET_VECTOR(String):
value = ""
tag = ["general"]
undef = True
descr = "Reset vector address for bsp start"
class BSP_UART_AVAIL_MASK(String):
value = "0x01"
tag = ["network"]
undef = True
descr = """
Bit mask to specify the uarts (pscs), which should be enabled on this board.
Must match the hardware requirements. Psc1 corresponds to the lsb
"""
class BSP_USE_NETWORK_FEC(Boolean):
value = False
tag = ["network"]
undef = True
descr = """
If defined, then the bsp will use the fast ethernet controller for 10/100mbit
networking and used as primary networking interface.
"""
class BSP_USE_NETWORK_SCC(Boolean):
value = False
tag = ["network"]
undef = True
descr = """
If defined, then the bsp will use the serial communications controller (scc1)
for 10mbit networking.
"""
class BSP_USE_UART2(Boolean):
value = False
tag = ["network"]
undef = True
descr = "If defined, enables uart2."
class BSP_USE_UART_INTERRUPTS(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Enable usage of interrupts for the uart modules"
class BSP_VIDEO_80x50(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, set the vga display to 80x50."
class CC(String):
value = ""
tag = ["build"]
undef = True
descr = "C compiler command"
class CCAS(String):
value = ""
tag = ["build"]
undef = True
descr = "Assembler compiler command (defaults to CC)"
class CCASFLAGS(String):
value = ""
tag = ["build"]
undef = True
descr = "Assembler compiler flags (defaults to cflags)"
class CCLK(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Cpu clock in hz"
class CD2401_INT_LEVEL(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Interrupt level for the cd2401 (when cd2401_io_mode == 1)."
class CD2401_IO_MODE(Integer):
value = 0
tag = ["build"]
undef = True
descr = "0 for polled I/O, 1 for interrupt-driven."
class CD2401_USE_TERMIOS(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable using termios based console."
class CFLAGS(StringList):
value = []
tag = ["build"]
undef = True
descr = "C compiler flags"
class CFLAGS_DEBUG(String):
value = ""
tag = ["build"]
undef = True
descr = "Debug compiler flags."
class CFLAGS_OPTIMISE(String):
value = ""
tag = ["build"]
undef = True
descr = "Compiler flags for optimisation"
class CLOCK_DRIVER_USE_8254(Integer):
value = 0
tag = ["general"]
undef = True
descr = """
If enabled, the clock driver will use the good old 8254 chip to report
microsecond-accuracy clock times. Enable it, if: - you have nanosecond
timing enabled (you do not have use_ticks_for_cpu_usage_statistics enabled)
- you do not have clock_driver_use_tsc enabled (use one, the other, or
neither) - you do not mind adding roughly 5 microseconds to each context
switch.
"""
class CLOCK_DRIVER_USE_8254CLOCK_DRIVER_USE_TSC(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If enabled, the clock driver will use the good old 8254 chip to report
microsecond-accuracy clock times. Enable it, if: 1, you have nanosecond timing
enabled (you do not have use_ticks_for_cpu_usage_statistics enabled) 2, you
do not have clock_driver_use_tsc enabled (use one, the other, or neither 3,
you do not mind adding roughly 5 microseconds to each context switch.
"""
class CLOCK_DRIVER_USE_FAST_IDLE(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
This sets a mode where the time runs as fast as possible when a clock isr
occurs while the idle thread is executing. This can significantly reduce
simulation times.
"""
class CLOCK_DRIVER_USE_TSC(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If enabled, the clock driver will use the tsc register available with pentium-
class cpus to report close to nanosecond-accuracy clock times. Enable it, if:
1, you have nanosecond timing enabled (you do not have
use_ticks_for_cpu_usage_statistics enabled 2, you do not have
clock_driver_use_8254 enabled (use one, the other, or neither 3, you have a
pentium which supports tsc (all intels, and probably all or most clones 4, you
do not have a variable-speed cpu clock. Note that some motherboard bios will
automatically vary clock speed for thermal control. Note also, however, that
really new pentium-class chips from intel and amd will maintain a constant-
rate tsc regardless.
"""
class CONFIG_CFLAGS(StringList):
value = []
tag = ["build"]
undef = True
descr = "Default compiler flags for rtems-config"
class CONFIG_CONSOLE(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for console (uart 0)"
class CONFIG_FPSP(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined enables the motorola floating point support package (fpsp)
"""
class CONFIG_I2C_0(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for i2c 0"
class CONFIG_I2C_1(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for i2c 1"
class CONFIG_I2C_2(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for i2c 2"
class CONFIG_LDFLAGS(StringList):
value = []
tag = ["build"]
undef = True
descr = "Default linker flags for rtems-config"
class CONFIG_LIBS(StringList):
value = []
tag = ["build"]
undef = True
descr = "= Default libraries for rtems-config"
class CONFIG_U3CLK(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Clock configuration for uart 3"
class CONFIG_U4CLK(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Clock configuration for uart 4"
class CONFIG_U5CLK(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Clock configuration for uart 5"
class CONFIG_U6CLK(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Clock configuration for uart 6"
class CONFIG_UART_1(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for uart 1"
class CONFIG_UART_2(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for uart 2"
class CONFIG_UART_3(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for uart 3"
class CONFIG_UART_CLKMODE(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Clock mode configuration for uarts"
class CONFIGURE_MALLOC_BSP_SUPPORTS_SBRK(Boolean):
value = True
tag = ["general"]
undef = True
descr = """
If defined then the bsp may reduce the available memory size initially. This
can be useful for debugging (reduce the core size) or dynamic loading (std gcc
text offsets/Jumps are < +/-32m). Note that the policy can still be defined by
the application (see sbrk.C, bsp_sbrk_policy). By undefining
configure_malloc_bsp_supports_sbrk this feature is removed and a little memory
is saved.
"""
class CONS_SCC1_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--scc1 uart if mode) must be defined if scc1 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not used
"""
class CONS_SCC2_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--scc2 uart if mode) must be defined if scc2 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not used
"""
class CONS_SCC3_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--scc3 uart if mode) must be defined if scc3 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not used
"""
class CONS_SCC4_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--scc4 uart if mode) must be defined if scc4 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not used
"""
class CONS_SMC1_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--smc1 uart if mode) must be defined if smc1 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not
used])
"""
class CONS_SMC2_MODE(String):
value = "CONS_MODE_UNUSED"
tag = ["network"]
undef = True
descr = """
(Bsp--smc2 uart if mode) must be defined if smc2 is used as a tty (uart)
channel. Set it to cons_mode_polled for polled operation, cons_mode_irq for
interrupt driven (spooled) operation. Set it to cons_mode_unused, if not used
"""
class CONSOLE_BAUDRATE(Integer):
value = 9600
tag = ["general"]
undef = True
descr = "The baudrate of the console uart."
class CONSOLE_CHN(String):
value = "CONS_CHN_SMC1"
tag = ["general"]
undef = True
descr = """
Bsp--console driver) must be defined to be one of cons_chn_smc1,
cons_chn_smc2, cons_chn_scc1, cons_chn_scc2, cons_chn_scc3, or cons_chn_scc4.
Determines which device will be registered as /Dev/Console.
"""
class CONSOLE_MINOR(String):
value = "SMC1_MINOR"
tag = ["general"]
undef = True
descr = """
Port to use for the rtems console: 0 - /Dev/Tty0, serial port 1/Console on the
mvme712m, 1 - /Dev/Tty1, serial port 2/Tty01 on the mvme712m, 2 - /Dev/Tty2,
serial port 3 on the mvme712m, 3 - /Dev/Tty3, serial port 4 on the mvme712m.])
"""
class CONSOLE_MINOR_DUPLICATE(String):
value = "SMC2_MINOR"
tag = ["general"]
undef = True
descr = """
Bsp--console driver) must be defined to be one of smc1_minor, smc2_minor,
scc2_minor, scc3_minor, or scc4_minor. Determines which device will be
registered as /Dev/Console.
"""
class CONSOLE_USE_INTERRUPTS(Boolean):
value = True
tag = ["general"]
undef = False
descr = """
The erc32 console driver can operate in either polled or interrupt mode. Under
the simulator (especially when fast_uart is defined), polled seems to operate
better. It is common for a task to print a line (like the end of test message)
and then exit. In this case, the program returns control to the simulator
command line before the program has even queued the output to the uart. Thus
sis has no chance of getting the data out.
"""
class CPU_CLOCK_RATE_HZ(Integer):
value = 20000000
tag = ["general"]
undef = True
descr = "Cpu clock rate in hz"
class DISABLE_MMU(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable mmu"
class DISABLE_READ_ONLY_PROTECTION(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable mmu protection of read-only sections"
class DISABLE_READ_WRITE_DATA_CACHE(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable cache for read-write data sections"
class DISPATCH_HANDLER_STAT(Boolean):
value = True
tag = ["build"]
undef = True
descr = "Used by irq/Irq.C"
class EMC_MICRON(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable micron configuration for emc"
class EMC_NUMONYX(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable numonyx configuration for emc"
class EMC_TEST(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable tests for emc"
class ENABLE(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Whether a bsp is enabled or disabled for use."
class ENABLE_DEBUG(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable debug build."
class ENABLE_FPSP(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Motorola floating point support package (fpsp)"
class ENABLE_LCD(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the sed1356 controller and LCD."
class ENABLE_MP(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable multiprocessing."
class ENABLE_MULTILIB(Boolean):
value = True
tag = ["general"]
undef = True
descr = "???"
class ENABLE_NETWORKING(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Enable tcp/Ip stack."
class ENABLE_NEWLIB(Boolean):
value = True
tag = ["general"]
undef = True
descr = "???"
class ENABLE_POSIX(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Enable posix."
class ENABLE_PTHREADS(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Enable pthreads, requires posix."
class ENABLE_SERDBG(Boolean):
value = False
tag = ["general"]
undef = True
descr = "???"
class ENABLE_SHELL(Boolean):
value = True
tag = ["general"]
undef = True
descr = "???"
class ENABLE_SIS_QUIRKS(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, then the sis simulator specific code in the bsp will be enabled.
In particular, sis requires special initialization not used on real erc32
"""
class ENABLE_SMP(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable smp, available for i386/Sparc only."
class ENABLE_UMON(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the umon console."
class ENABLE_UMON_CONSOLE(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the micromonitor console device."
class ENABLE_USART0(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the usart 0."
class ENABLE_USART1(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the usart 1."
class ENABLE_USART2(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the usart 2."
class ENABLE_USART3(Boolean):
value = False
tag = ["general"]
undef = True
descr = "If defined, enable use of the usart 3."
class ENABLE_WATCHDOG_RESET(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Bsp_reset() will use the watchdog to reset the chip"
class EPPCBUG_SMC1(Boolean):
value = True
tag = ["build"]
undef = True
descr = """
If defined, smc1 is in use by eppc-bug. The console driver will not re-
initialize that port.
"""
class EPPCBUG_VECTORS(Boolean):
value = True
tag = ["build"]
undef = True
descr = """
(Bsp--rtems) if defined, vectors branch to eppcbug, except the following:
0x500 (external interrupt), 0x900 (decrementer).])
"""
class ETHERNET_RMII(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Enable rmii for ethernet"
class GEN83XX_ENABLE_INTERRUPT_NESTING(Boolean):
value = True
tag = ["build"]
undef = True
descr = "Enable interrupt nesting"
class HAS_DBUG(Integer):
value = 0
tag = ["general"]
undef = True
descr = """
If defined, we will not boot from reset, but from freescale dbug monitor.
"""
class HAS_LOW_LEVEL_INIT(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, we will do all the low level init of the chip (like
bus/Memory...).
"""
class HAS_PMC_PSC8(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Whether has a psc8 pmc board attached to pmc slot"
class HAS_SMC91111(Boolean):
value = False
tag = ["network"]
undef = True
descr = "If defined the board has the smc91111 networking chip."
class HAS_UBOOT(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable u-boot startup"
class HAVE_SHSIM_IOMEM_PATCH(Boolean):
value = True
tag = ["build"]
undef = True
descr = """
Whether support for functional iomem in shsim/Gdb shall be enabled
"""
class HCLK(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Ahb bus clock in hz"
class HEAP_EXTEND(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Enable heap extend by ethernet and usb regions"
class IDE_USE_PRIMARY_INTERFACE(Boolean):
value = True
tag = ["general"]
undef = True
descr = """
Determines, whether rtems will try to use the primary ide interface. Disable
it, if: 1, you have no primary ide interface. 2, you have no disk attached to
this interface or 3, you do not want to access disks attached to this
interface.
"""
class IDE_USE_SECONDARY_INTERFACE(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
Determines, whether rtems will try to use the secondary ide interface. Enable
it, if: 1, you have a secondary ide interface 2, you have at least one disk
attached to this interface 3, you do want to access disks attached to this
interface.
"""
class INITIALIZE_COM_PORTS(Boolean):
value = False
tag = ["general"]
undef = True
descr = "???"
class INTERRUPT_USE_TABLE(Boolean):
value = True
tag = ["build"]
undef = True
descr = "Select if interrupt use table or link list"
class LDFLAGS(StringList):
value = []
tag = ["build"]
undef = True
descr = """
Linker flags only, do not use this for directories or libraries
"""
class LIBS(StringList):
value = []
tag = ["build"]
undef = True
descr = "Libraries to pass to the linker, e.G. -L<library>"
class LINK_END(StringList):
value = []
tag = ["build"]
undef = True
descr = "Objects linked last"
class LINK_START(StringList):
value = []
tag = ["build"]
undef = True
descr = "Objects linked first"
class LINK_LINK(StringList):
value = ["-L${RTEMS} -T ${RTEMS}/linkcmds -dc -dp -N"]
tag = ["build"]
undef = True
descr = "Linker link flags"
class LINKCMDS(StringList):
value = []
tag = ["build"]
undef = True
descr = "Linker command files, first one is installed as linkcmds"
class LPC24XX_CCLK(String):
value = "72000000U"
tag = ["general"]
undef = True
descr = "Cpu clock in hz"
class LPC24XX_CONFIG_CONSOLE(Integer):
value = 0
tag = ["network"]
undef = True
descr = "Configuration for console (uart 0)"
class LPC24XX_CONFIG_I2C_0(Integer):
value = 0
tag = ["network"]
undef = True
descr = "Configuration for i2c 0"
class LPC24XX_CONFIG_I2C_1(Integer):
value = 1
tag = ["network"]
undef = True
descr = "Configuration for i2c 1"
class LPC24XX_CONFIG_I2C_2(String):
value = ""
tag = ["network"]
undef = True
descr = "Configuration for i2c 2"
class LPC24XX_CONFIG_UART_1(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Configuration for uart 1"
class LPC24XX_CONFIG_UART_2(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for uart 2"
class LPC24XX_CONFIG_UART_3(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Configuration for uart 3"
class LPC24XX_EMC_MICRON(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Enable micron configuration for emc"
class LPC24XX_EMC_NUMONYX(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Enable numonyx configuration for emc"
class LPC24XX_EMC_TEST(String):
value = ""
tag = ["general"]
undef = True
descr = "Enable tests for emc"
class LPC24XX_ETHERNET_RMII(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Enable rmii for ethernet"
class LPC24XX_HEAP_EXTEND(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Enable heap extend by ethernet and usb regions"
class LPC24XX_OSCILLATOR_MAIN(String):
value = "12000000U"
tag = ["general"]
undef = True
descr = "Main oscillator frequency in hz"
class LPC24XX_OSCILLATOR_RTC(String):
value = "32768U"
tag = ["general"]
undef = True
descr = "Rtc oscillator frequency in hz"
class LPC24XX_SPECIAL_TASK_STACKS_SUPPORT(Integer):
value = 1
tag = ["general"]
undef = True
descr = """
Enable special task stack support for task stacks in internal ram
"""
class LPC24XX_STOP_ETHERNET(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Stop ethernet controller at start-up to avoid dma interference"
class LPC24XX_STOP_GPDMA(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Stop general purpose dma at start-up to avoid dma interference"
class LPC24XX_STOP_USB(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Stop usb controller at start-up to avoid dma interference"
class LPC24XX_UART_BAUD(String):
value = "115200U"
tag = ["general"]
undef = True
descr = "Baud for uarts"
class LPC32XX_ARM_CLK(String):
value = "208000000U"
tag = ["general"]
undef = True
descr = "Arm clock in hz"
class LPC32XX_CONFIG_U3CLK(String):
value = ""
tag = ["network"]
undef = True
descr = "Clock configuration for uart 3"
class LPC32XX_CONFIG_U4CLK(String):
value = ""
tag = ["network"]
undef = True
descr = "Clock configuration for uart 4"
class LPC32XX_CONFIG_U5CLK(String):
value = "0x00001386U"
tag = ["network"]
undef = True
descr = "Clock configuration for uart 5"
class LPC32XX_CONFIG_U6CLK(String):
value = ""
tag = ["network"]
undef = True
descr = "Clock configuration for uart 6"
class LPC32XX_CONFIG_UART_CLKMODE(String):
value = "0x00000200U"
tag = ["network"]
undef = True
descr = "Clock mode configuration for uarts"
class LPC32XX_DISABLE_MMU(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable mmu"
class LPC32XX_DISABLE_READ_ONLY_PROTECTION(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable mmu protection of read-only sections"
class LPC32XX_DISABLE_READ_WRITE_DATA_CACHE(Boolean):
value = False
tag = ["storage"]
undef = True
descr = "Disable cache for read-write data sections"
class LPC32XX_ENABLE_WATCHDOG_RESET(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Enable watchdog reset"
class LPC32XX_ETHERNET_RMII(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Enable rmii for ethernet"
class LPC32XX_HCLK(String):
value = "104000000U"
tag = ["general"]
undef = True
descr = "Ahb bus clock in hz"
class LPC32XX_OSCILLATOR_MAIN(String):
value = "13000000U"
tag = ["general"]
undef = True
descr = "Main oscillator frequency in hz"
class LPC32XX_OSCILLATOR_RTC(String):
value = "32768U"
tag = ["general"]
undef = True
descr = "Rtc oscillator frequency in hz"
class LPC32XX_PERIPH_CLK(String):
value = "13000000U"
tag = ["general"]
undef = True
descr = "Peripheral clock in hz"
class LPC32XX_SCRATCH_AREA_SIZE(Integer):
value = 4096
tag = ["general"]
undef = True
descr = "Size of scratch area"
class LPC32XX_STOP_ETHERNET(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Stop ethernet controller at start-up to avoid dma interference"
class LPC32XX_STOP_GPDMA(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Stop general purpose dma at start-up to avoid dma interference"
class LPC32XX_STOP_USB(Boolean):
value = True
tag = ["general"]
undef = True
descr = "Stop usb controller at start-up to avoid dma interference"
class LPC32XX_UART_1_BAUD(String):
value = ""
tag = ["network"]
undef = True
descr = "Baud for uart 1"
class LPC32XX_UART_2_BAUD(String):
value = ""
tag = ["network"]
undef = True
descr = "Baud for uart 2"
class LPC32XX_UART_7_BAUD(String):
value = ""
tag = ["network"]
undef = True
descr = "Baud for uart 7"
class MPC5200_PSC_INDEX_FOR_GPS_MODULE(Integer):
value = 0
tag = ["build"]
undef = True
descr = "Psc index for gps module, if defined results in '/Dev/Gps'"
class MPC55XX_BOARD_GWLCFM(Boolean):
value = True
tag = ["build"]
undef = True
descr = "If defined, use custom settings for gwlcfm board"
class MPC55XX_BOARD_MPC5566EVB(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for mpc5566evb board"
class MPC55XX_BOARD_MPC5674FEVB(Boolean):
value = True
tag = ["build"]
undef = True
descr = "If defined, use custom settings for mpc5674fevb board"
class MPC55XX_BOARD_PHYCORE_MPC5554(Boolean):
value = True
tag = ["build"]
undef = True
descr = "If defined, use custom settings for phycore mpc5554 board"
class MPC55XX_BOOTFLAGS(String):
value = ""
tag = ["build"]
undef = True
descr = """
If defined, builds in bootflags above the rchw for setup in a debugger to
avoid startup mmu setup
"""
class MPC55XX_CHIP_TYPE(Integer):
value = 5554
tag = ["build"]
undef = True
descr = "Specifies the chip type in use (e.G. 5554 for mpc5554"
class MPC55XX_CLOCK_EMIOS_CHANNEL(String):
value = "MPC55XX_EMIOS_CHANNEL_NUMBER-1"
tag = ["build"]
undef = True
descr = """
Define to the emios channel to use for the bsp clock. The default is the last
channel.
"""
class MPC55XX_EMIOS_PRESCALER(Integer):
value = 1
tag = ["build"]
undef = True
descr = "Must be defined to set the emios prescaler"
class MPC55XX_ESCI_CONSOLE_MINOR(Integer):
value = 0
tag = ["general"]
undef = True
descr = """
Determines which esci device will be registered as /Dev/Console
"""
class MPC55XX_ESCI_USE_INTERRUPTS(Boolean):
value = True
tag = ["build"]
undef = True
descr = """
Define to zero or one to disable or enable interrupts for the esci devices
"""
class MPC55XX_FMPLL_CLK_OUT(Integer):
value = 128000000
tag = ["general"]
undef = True
descr = """
Must be defined to be the pll output clock (in hz) for clock generation
"""
class MPC55XX_FMPLL_MFD(Integer):
value = 12
tag = ["general"]
undef = True
descr = """
Must be defined to be the pll multiplication factor for clock generation
"""
class MPC55XX_FMPLL_PREDIV(Integer):
value = 1
tag = ["general"]
undef = True
descr = """
Must be defined to be the pll predivider factor for clock generation
"""
class MPC55XX_FMPLL_REF_CLOCK(Integer):
value = 8000000
tag = ["general"]
undef = True
descr = """
Must be defined to be the external reference clock (in hz) for clock
generation
"""
class NVRAM_CONFIGURE(Boolean):
value = True
tag = ["storage"]
undef = True
descr = """
Define to 1 if you want the console driver, network driver and caches
configured at boot time from parameters stored in nvram. If set to 1, most
parameters below are ignored during the build. If not set to 1, then the
console driver is configured at build time, the network host information is
obtained from application supplied data structures, and the caches are
configured at boot time based on the information supplied in this file.
"""
class ON_SIMULATOR(Boolean):
value = False
tag = ["build"]
undef = True
descr = """
If defined, this indicates the bsp is being built to run on the lm32 simulator
in gdb. This enables fast idle support which speeds up the clock ticks while
the idle task is running so time spent in the idle task is minimized. This
significantly reduces the wall time required to execute the rtems test suites.
It also enables a special exit and alternate printk support.
"""
class ON_SKYEYE(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, enable options which optimize executingon the skyeye simulator.
Speed up the clock ticks while the idle task is running so time spent in the
idle task is minimized. This significantly reduces the wall time required to
execute the rtems test suites.
"""
class OSCILLATOR_MAIN(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Main oscillator frequency in hz"
class OSCILLATOR_RTC(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Rtc oscillator frequency in hz"
class PATH_TOOLS(String):
value = ""
tag = ["general"]
undef = True
descr = "Location of rtems tools."
class PERIPH_CLK(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Peripheral clock in hz"
class PPC_USE_SPRG(Boolean):
value = True
tag = ["build"]
undef = True
descr = """
If defined, then the powerpc specific code in rtems will use some of the
special purpose registers to slightly optimize interrupt response time. The
use of these registers can conflict with other tools like debuggers.
"""
class PPC_VECTOR_FILE_BASE(String):
value = "0x0100"
tag = ["build"]
undef = True
descr = """
This defines the base address of the exception table. Note: vectors are
actually at 0xfff00000 but file starts at offset.
"""
class PREFIX(String):
value = ""
tag = ["general"]
undef = True
descr = "Install prefix."
class PRINTK_CHN(String):
value = "NOT_DEFINED_IN_BSP"
tag = ["general"]
undef = True
descr = """
(Bsp--console driver) must be defined to be one of cons_chn_smc1,
cons_chn_smc2, cons_chn_scc2, cons_chn_scc3, or cons_chn_scc4. Determines
which device is used for output y printk(). If the port that printk() uses is
also used for other I/O (e.G. If printk_chn == console_chn), then both ports
should use the same type of I/O, otherwise the drivers will likely conflict
with each other.
"""
class PRINTK_IO_MODE(Integer):
value = 0
tag = ["general"]
undef = True
descr = """
(Bsp--console driver) define to 0 or 1 if you want polled I/O performed by
rtems. Define to 2 if you want polled I/O performed by eppcbug. The printk()
port is not configured to use termios. With eppcbug 1.1, if mode 2 is
selected, printk_minor must be set to smc1_minor. This is a deficiency of the
firmware: it does not perform serial I/O on any port other than its default
debug port, which must be smc1. Printk always uses polled output.
"""
class PRINTK_MINOR(String):
value = "NOT_DEFINED_IN_BSP"
tag = ["general"]
undef = True
descr = """
Port to use for the rtems console: 0 - /Dev/Tty0, serial port 1/Console on the
mvme712m, 1 - /Dev/Tty1, serial port 2/Tty01 on the mvme712m, 2 - /Dev/Tty2,
serial port 3 on the mvme712m, 3 - /Dev/Tty3, serial port 4 on the mvme712m.])
"""
class PRINTK_MINOR_DUPLICATE(String):
value = "SMC2_MINOR"
tag = ["general"]
undef = True
descr = """
(Bsp--console driver) must be defined to be one of smc1_minor, smc2_minor,
scc2_minor, scc3_minor, or scc4_minor. Determines which device is used for
output by printk(). If the port that printk() uses is also used for other I/O
(e.G. If printk_minor == \$console_minor), then both ports should use the
same type of I/O, otherwise the drivers will likely conflict with each other.
"""
class QORIQ_CLOCK_TIMER(Integer):
value = 0
tag = ["general"]
undef = True
descr = """
Global timer used for system clock, 0..3 maps to a0..a3, and 4..7 maps to
b0..b3
"""
class QORIQ_ETSEC_1_PHY_ADDR(Integer):
value = -1
tag = ["general"]
undef = True
descr = "Phy address for etsec interface 1"
class QORIQ_ETSEC_2_PHY_ADDR(Integer):
value = 0
tag = ["general"]
undef = True
descr = "Phy address for etsec interface 2"
class QORIQ_ETSEC_3_PHY_ADDR(Integer):
value = 1
tag = ["general"]
undef = True
descr = "Phy address for etsec interface 3"
class QORIQ_INITIAL_MSR(String):
value = "0x02000200"
tag = ["general"]
undef = True
descr = "Initial msr value"
class QORIQ_INITIAL_SPEFSCR(String):
value = "0x00000000"
tag = ["general"]
undef = True
descr = "Initial spefscr value"
class QORIQ_INTERCOM_AREA_BEGIN(String):
value = "0x3000000"
tag = ["build"]
undef = True
descr = "Inter-processor communication area begin"
class QORIQ_INTERCOM_AREA_SIZE(String):
value = "0x1000000"
tag = ["build"]
undef = True
descr = "Inter-processor communication area size"
class QORIQ_UART_0_ENABLE(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Use 1 to enable uart 0, otherwise use 0"
class QORIQ_UART_1_ENABLE(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Use 1 to enable uart 1, otherwise use 0"
class QORIQ_UART_BRIDGE_0_ENABLE(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Use 1 to enable uart 0 to intercom bridge, otherwise use 0"
class QORIQ_UART_BRIDGE_1_ENABLE(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Use 1 to enable uart 1 to intercom bridge, otherwise use 0"
class QORIQ_UART_BRIDGE_MASTER_CORE(Integer):
value = 0
tag = ["network"]
undef = True
descr = "Uart to intercom bridge master core index"
class QORIQ_UART_BRIDGE_SLAVE_CORE(Integer):
value = 1
tag = ["network"]
undef = True
descr = "Uart to intercom bridge slave core index"
class QORIQ_UART_BRIDGE_TASK_PRIORITY(Integer):
value = 250
tag = ["network"]
undef = True
descr = "Uart to intercom bridge task priority"
class RTEMS_BSP_I2C_EEPROM_DEVICE_NAME(String):
value = "eeprom"
tag = ["storage"]
undef = True
descr = "Eeprom name for libi2c"
class RTEMS_BSP_I2C_EEPROM_DEVICE_PATH(String):
value = "/dev/i2c1.eeprom"
tag = ["storage"]
undef = True
descr = "Eeprom device file path"
class RTEMS_XPARAMETERS_H(String):
value = "<xparameters_dflt.h>"
tag = ["general"]
undef = True
descr = """
This defines the location of the hardware specific xparameters.H
"""
class RTEMS_XPPC_BASE(String):
value = "."
tag = ["build"]
undef = True
descr = "Defines path to xilinx xps ppc libraries."
class SCORE603E_OPEN_FIRMWARE(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Use open firmware rom monitor"
class SCORE603E_USE_DINK(Boolean):
value = False
tag = ["build"]
undef = True
descr = "???"
class SCORE603E_USE_NONE(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Use no rom monitor"
class SCORE603E_USE_SDS(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Use sds rom monitor"
class SCRATCH_AREA_SIZE(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Size of scratch area"
class SIMSPARC_FAST_IDLE(Boolean):
value = False
tag = ["build"]
undef = True
descr = """
If defined, speed up the clock ticks while the idle task is running so time
spent in the idle task is minimized. This significantly reduces the wall time
required to execute the rtems test suites.
"""
class SINGLE_CHAR_MODE(String):
value = ""
tag = ["general"]
undef = True
descr = "Enable single character mode for the psc console driver"
class SMC91111_ENADDR_IS_SETUP(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined the smc91111 chip has the ethernet address loaded at reset.
"""
class SPECIAL_TASK_STACKS_SUPPORT(Boolean):
value = False
tag = ["build"]
undef = True
descr = """
Enable special task stack support for task stacks in internal ram.
"""
class SPI_BOARD_INIT_FNC(String):
value = "bsp_dummy_spi_init"
tag = ["build"]
undef = True
descr = """
(Bsp--spi board init function) specify the function that inits the board port
lines and further devices.
"""
class SPI_SEND_ADDR_FNC(String):
value = "bsp_dummy_spi_sel_addr"
tag = ["build"]
undef = True
descr = """
Bsp--spi send address function) specify the function that addresses spi
devices. Set to bsp_dummy_spi_sel_addr for dummy implementation
"""
class SPI_SEND_STOP_FNC(String):
value = "bsp_dummy_spi_send_stop"
tag = ["build"]
undef = True
descr = """
Bsp--spi send stop function) specify the function that deaddresses spi
devices. Set to bsp_dummy_spi_send_stop for dummy implementation
"""
class STANDALONE_EVB(String):
value = ""
tag = ["general"]
undef = True
descr = """
If defined, compiles code to jump-start from flash, without a monitor
"""
class START_HW_INIT(String):
value = ""
tag = ["build"]
undef = True
descr = """
If defined, selects whether 'early_hw_init()' is called from 'start.S';
'bsp_hw_init()' is always called from 'bspstart.C'
"""
class STOP_ETHERNET(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Stop ethernet controller at start-up to avoid dma interference"
class STOP_GPDMA(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Stop general purpose dma at start-up to avoid dma interference"
class STOP_USB(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Stop usb controller at start-up to avoid dma interference"
class TESTS_USE_PRINTK(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Tests use printk() for output"
class UART_1_BAUD(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Baud for uart 1"
class UART_2_BAUD(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Baud for uart 2"
class UART_7_BAUD(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Baud for uart 7"
class UART_BAUD(Boolean):
value = False
tag = ["network"]
undef = True
descr = "Baud for uarts"
class UART_USE_DMA(Boolean):
value = True
tag = ["network"]
undef = True
descr = """
The uart driver can operate in dma mode with interrupts. Set true if dma
operation is required
"""
class UARTS_IO_MODE(Integer):
value = 0
tag = ["network"]
undef = True
descr = """
Define to 0 or 1 if you want polled I/O performed by rtems. Define to 1 if
you want interrupt-driven performed by rtems. Define to 2 if you want polled
I/O performed by eppcbug. There is no provision to have a MIX of interrupt-
driven and polled I/O ports, except that the printk port may use a different
mode from the other ports. If this is done, do not open the printk port from
an rtems application. With eppcbug 1.1, if mode 2 is selected, console_minor
must be set to smc1_minor. This is a deficiency of the firmware: it does not
perform serial I/O on any port other than its default debug port, which must
be smc1.
"""
class UARTS_USE_TERMIOS(Boolean):
value = False
tag = ["network"]
undef = True
descr = """
Define to 1 if you want termios support for every port. Termios support is
independent of the choice of uart I/O mode.
"""
class UARTS_USE_TERMIOS_INT(Boolean):
value = True
tag = ["network"]
undef = True
descr = "Enable interrupt support for the psc console driver"
class USE_COM1_AS_CONSOLE(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
Determines, whether the console will be associated with the standard vga
display or with the com1 serial port. Currently only the vga display and com1
support printk.
"""
class WATCHDOG_TIMEOUT(String):
value = "0xFFFF"
tag = ["general"]
undef = True
descr = """
Define to the desired timeout (in steps of 1/20 msec) to enable the watchdog.
Default is to disable the watchdog entirely.
"""
# These are all hacks, they only exist to enable shared BSPS, they are not
# required and will be removed in the future.
class BOARD_PHYCORE_MPC5554(Boolean):
value = False
tag = ["general"]
undef = True
descr = """
If defined, use custom settings for the phytec phycore mpc5554 som
"""
class BSP_TYPE_DP2(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable settings for dp2"
class csb637(Boolean):
value = False
tag = ["build"]
undef = True
descr = """
If defined, this indicates that the bsp is being built for the csb637
variant.
"""
class GEN68360(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the gen68360 bsp."
class GEN68360_040(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the gen68360_040 bsp."
class HSC_CM01(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the hsc_cm01 bsp."
class M5484FIREENGINE(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the m5484fireengine bsp."
class mpc8240(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Defined for boards with mpc8240 -- undefined for others"
class MPC8313ERDB(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the mpc8313erdb bsp."
class MPC8349(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the mpc8349 libcpu family."
class MPC8349EAMDS(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the mpc8349eamds bsp."
class mvme167(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Defined for mvme167 -- undefined for others"
class mvme2100(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Defined for mvme2100 -- undefined for others"
class PGH360(Boolean):
value = False
tag = ["build"]
undef = True
descr = "If defined, use custom settings for the pgh360 bsp."
class qemu(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Defined for qemu bsp -- undefined for others"
class MPC5200_BOARD_BRS5L(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 BRS5L"
class MPC5200_BOARD_BRS6L(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 BRS6l"
class MPC5200_BOARD_DP2(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 dp2"
class MPC5200_BOARD_ICECUBE(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 icecube"
class MPC5200_BOARD_PM520_CR825(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 PM520_CR825"
class MPC5200_BOARD_PM520_ZE30(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Enable settings for powerpc MPC5200 pm520"
# RTEMS internal options.
class USE_CLANG(Boolean):
value = False
tag = ["build"]
undef = True
descr = "Use Clang compiler."
class USE_GCC(Boolean):
value = True
tag = ["build"]
undef = True
descr = "Use GCC compiler.."
# THESE ARE UNSORTED!
class LPC24XX_PCLKDIV(String):
value = "1U"
tag = ["general"]
undef = True
descr = "clock divider for default PCLK (PCLK = CCLK / PCLKDIV)"
class LPC24XX_EMCCLKDIV(String):
value = "2U"
tag = ["general"]
undef = True
descr = "clock divider for EMCCLK (EMCCLK = CCLK / EMCCLKDIV)"
class LPC24XX_EMC_MT48LC4M16A2(Boolean):
value = False
tag = ["general"]
undef = True
descr = "enable Micron MT48LC4M16A2 configuration for EMC"
class LPC24XX_EMC_W9825G2JB75I(Boolean):
value = True
tag = ["general"]
undef = True
descr = "enable Winbond W9825G2JB75I configuration for EMC"
class LPC24XX_EMC_IS42S32800D7(Boolean):
value = True
tag = ["general"]
undef = True
descr = "enable ISSI IS42S32800D7 configuration for EMC"
class LPC24XX_EMC_IS42S32800B(Boolean):
value = True
tag = ["general"]
undef = True
descr = "enable ISSI IS42S32800B configuration for EMC"
class LPC24XX_EMC_M29W160E(Boolean):
value = True
tag = ["general"]
undef = True
descr = "enable M29W160E configuration for EMC"
class LPC24XX_EMC_M29W320E70(Boolean):
value = False
tag = ["general"]
undef = True
descr = "enable M29W320E70 configuration for EMC"
class LPC24XX_EMC_SST39VF3201(Boolean):
value = True
tag = ["general"]
undef = True
descr = "enable SST39VF3201 configuration for EMC"
class LPC_DMA_CHANNEL_COUNT(Integer):
value = 2
tag = ["general"]
undef = True
descr = "DMA channel count"
class BSP_USB_OTG_TRANSCEIVER_I2C_ADDR(String):
value = ""
tag = ["general"]
undef = True
descr = "USB OTG transceiver I2C address used by USB stack"
class MPC55XX_CHIP_FAMILY(String):
value = "(MPC55XX_CHIP_TYPE / 10)"
tag = ["general"]
undef = True
descr = "specifies the chip family in use (e.g. 555 for MPC5554)"
class SMSC9218I_EDMA_RX_CHANNEL(Integer):
value = 49
tag = ["network"]
undef = True
descr = "receive eDMA channel for SMSC9218I network interface"
class SMSC9218I_EDMA_TX_CHANNEL(Integer):
value = 48
tag = ["network"]
undef = True
descr = "transmit eDMA channel for SMSC9218I network interface"
class SMSC9218I_BIG_ENDIAN_SUPPORT(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable big endian support for SMSC9218I network interface"
class SMSC9218I_ENABLE_LED_OUTPUTS(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable LED outputs for SMSC9218I network interface"
class SMSC9218I_RESET_PIN(Integer):
value = 186
tag = ["network"]
undef = True
descr = "reset pin for SMSC9218I network interface"
class SMSC9218I_IRQ_PIN(Integer):
value = 193
tag = ["network"]
undef = True
descr = "IRQ pin for SMSC9218I network interface"
class MPC55XX_SYSTEM_CLOCK_DIVIDER(Integer):
value = 1
tag = ["general"]
undef = True
descr = "system clock divider"
class MPC55XX_REFERENCE_CLOCK(Integer):
value = 8000000
tag = ["general"]
undef = True
descr = "Must be defined to be the external reference clock (in Hz) for clock generation"
class MPC55XX_SYSTEM_CLOCK(Integer):
value = 8000000
tag = ["general"]
undef = True
descr = "The system clock frequency in Hz."
class MPC55XX_FMPLL_ESYNCR1_CLKCFG(Integer):
value = 7
tag = ["general"]
undef = True
descr = "the FMPLL ESYNCR1[CLKCFG] value"
class MPC83XX_BOARD_HSC_CM01(Boolean):
value = True
tag = ["build"]
undef = True
descr = "if defined, then use settings for the HSC_CM01 board"
class LM3S69XX_ENABLE_UART_0(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable UART 0"
class LM3S69XX_ENABLE_UART_1(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable UART 1"
class LM3S69XX_ENABLE_UART_2(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable UART 2"
class LM3S69XX_HAS_UDMA(Boolean):
value = False
tag = ["general"]
undef = True
descr = "defined if MCU supports UDMA"
class LM3S69XX_MCU_LM3S3749(Boolean):
value = False
tag = ["build"]
undef = True
descr = "board has LM3S3749 MCU"
class LM3S69XX_MCU_LM3S6965(Boolean):
value = False
tag = ["build"]
undef = True
descr = "board has LM3S6965 MCU"
class LM3S69XX_NUM_GPIO_BLOCKS(Integer):
value = 1
tag = ["build"]
undef = True
descr = "number of GPIO blocks supported by MCU"
class LM3S69XX_NUM_SSI_BLOCKS(Integer):
value = 1
tag = ["build"]
undef = True
descr = "number of SSI blocks supported by MCU"
class LM3S69XX_SSI_CLOCK(String):
value = "1000000U"
tag = ["general"]
undef = True
descr = "SSI clock in Hz"
class LM3S69XX_SYSTEM_CLOCK(String):
value = "50000000U"
tag = ["general"]
undef = True
descr = "system clock in Hz"
class LM3S69XX_UART_BAUD(String):
value = "115200U"
tag = ["general"]
undef = True
descr = "baud for UARTs"
class LM3S69XX_USE_AHB_FOR_GPIO(Boolean):
value = False
tag = ["general"]
undef = True
descr = "use AHB apperture to access GPIO registers"
class LM3S69XX_XTAL_CONFIG(String):
value = "0x10"
tag = ["build"]
undef = True
descr = "crystal configuration for RCC register"
class BSP_ARM_A9MPCORE_PERIPHCLK(String):
value = "100000000U"
tag = ["build"]
undef = True
descr = "ARM Cortex-A9 MPCore PERIPHCLK clock frequency in Hz"
class STM32F4_HSE_OSCILLATOR(Integer):
value = 8000000
tag = ["build"]
undef = True
descr = "HSE oscillator frequency in Hz"
class STM32F4_SYSCLK(Integer):
value = 16000000
tag = ["general"]
undef = True
descr = "SYSCLK frequency in Hz"
class STM32F4_HCLK(Integer):
value = 16000000
tag = ["general"]
undef = True
descr = "HCLK frequency in Hz"
class STM32F4_PCLK1(Integer):
value = 16000000
tag = ["general"]
undef = True
descr = "PCLK1 frequency in Hz"
class STM32F4_PCLK2(Integer):
value = 16000000
tag = ["general"]
undef = True
descr = "PCLK2 frequency in Hz"
class STM32F4_USART_BAUD(Integer):
value = 115200
tag = ["network"]
undef = True
descr = "baud for USARTs"
class STM32F4_ENABLE_USART_1(Boolean):
value = False
tag = ["network"]
undef = True
descr = "enable USART 1"
class STM32F4_ENABLE_USART_2(Boolean):
value = False
tag = ["network"]
undef = True
descr = "enable USART 2"
class STM32F4_ENABLE_USART_3(Boolean):
value = True
tag = ["network"]
undef = True
descr = "enable USART 3"
class STM32F4_ENABLE_UART_4(Boolean):
value = False
tag = ["network"]
undef = True
descr = "enable UART 4"
class STM32F4_ENABLE_UART_5(Boolean):
value = False
tag = ["network"]
undef = True
descr = "enable UART 5"
class STM32F4_ENABLE_USART_6(Boolean):
value = False
tag = ["network"]
undef = True
descr = "enable USART 6"
class MPC83XX_BOARD_BR_UID(Boolean):
value = True
tag = ["general"]
undef = True
descr = "if defined, then use settings for the BR UID board"
class MPC83XX_NETWORK_INTERFACE_0_PHY_ADDR(String):
value = "0x11"
tag = ["build"]
undef = True
quote = False
descr = "PHY address of network interface 0"
class MPC83XX_CHIP_TYPE(Integer):
value = 0
tag = ["build"]
undef = True
descr = "chip type of the MPC83XX family"
class MPC83XX_HAS_NAND_LP_FLASH_ON_CS0(Boolean):
value = True
tag = ["build"]
undef = True
descr = "indicates if the board has a NAND large page flash on chip select 0"
class BSP_INTERRUPT_HANDLER_TABLE_SIZE(Integer):
no_default = True
undef = True
descr = "defines the maximum number of interrupt handlers"
tag = ["general"]
class MPC55XX_NULL_POINTER_PROTECTION(Boolean):
value = True
tag = ["build"]
undef = True
descr = "enable NULL pointer protection"
class MPC55XX_CLOCK_PIT_CHANNEL(Integer):
no_default = True
undef = True
descr = "selects the PIT channel for the RTEMS system tick (the default is the last channel"
tag = ["build"]
class MPC55XX_NEEDS_LOW_LEVEL_INIT(Boolean):
value = True
tag = ["build"]
undef = True
descr = "if defined, do low level initialization"
class BSP_DATA_CACHE_USE_WRITE_THROUGH(Boolean):
no_default = True
undef = True
descr = "use write-through for data cache"
tag = ["storage"]
class MPC55XX_BOARD_MPC5674F_ECU508(Boolean):
value = True
tag = ["build"]
undef = True
descr = "if defined, use custom settings for ECU508 board"
class MPC55XX_CONSOLE_MINOR(Integer):
value = 0
tag = ["build"]
undef = True
descr = "determines which serial device will be registered as /dev/console"
class MPC55XX_BOARD_MPC5674F_RSM6(Boolean):
value = True
tag = ["build"]
quote = False
undef = True
descr = "if defined, use custom settings for RSM6 board"
class MPC55XX_ENABLE_START_PROLOGUE(Boolean):
value = True
tag = ["build"]
undef = True
descr = "if defined, enable start prologue"
class BSP_DEFAULT_BAUD_RATE(Integer):
value = 115200
tag = ["general"]
undef = True
descr = "default console baud"
class MPC55XX_EARLY_STACK_SIZE(Integer):
value = 1024
tag = ["build"]
undef = True
descr = "size of the early initialization stack in bytes"
class MPC83XX_BOARD_MPC8309SOM(Boolean):
value = True
tag = ["build"]
undef = True
descr = "if defined, then use settings for the MPC8309SOM board"
class ZYNQ_RAM_ORIGIN(String):
value = "0x00400000"
tag = ["storage"]
undef = True
descr = "Normal RAM region origin"
class ZYNQ_RAM_MMU(String):
value = "%(ZYNQ_RAM_ORIGIN)s"
tag = ["storage"]
quote = False
undef = True
descr = "MMU region origin"
class ZYNQ_RAM_MMU_LENGTH(String):
value = "16k"
tag = ["storage"]
undef = True
descr = "MMU region length"
class ZYNQ_RAM_ORIGIN_AVAILABLE(String):
value = "%(ZYNQ_RAM_ORIGIN)s + 0x00004000"
tag = ["storage"]
undef = True
descr = "Origin of available RAM"
class ZYNQ_RAM_LENGTH_AVAILABLE(String):
value = "%(BSP_ZYNQ_RAM_LENGTH)s - 1M - 16k"
tag = ["storage"]
undef = True
descr = "Length of available RAM"
class ZYNQ_RAM_INT_0_ORIGIN(String):
value = "0x00000000"
tag = ["storage"]
undef = True
descr = "Internal 0 RAM region origin"
class ZYNQ_RAM_INT_0_LENGTH(String):
value = "64k + 64k + 64k"
tag = ["storage"]
undef = True
descr = "Internal 0 RAM region length"
class ZYNQ_RAM_INT_1_ORIGIN(String):
value = "0xFFFF0000"
tag = ["storage"]
undef = True
descr = "Internal 1 RAM region origin"
class ZYNQ_RAM_INT_1_LENGTH(String):
value = "64k - 512"
tag = ["storage"]
undef = True
descr = "Internal 1 RAM region length"
class BSP_ZYNQ_RAM_LENGTH(String):
value = "256M"
tag = ["storage"]
quote = False
undef = True
descr = "Override a BSP's default RAM length"
class ZYNQ_RAM_NOCACHE_LENGTH(String):
value = "1M"
tag = ["storage"]
quote = False
undef = True
descr = "Length of nocache RAM region"
class ZYNQ_CLOCK_CPU_1X(String):
value = "111111111U"
tag = ["general"]
quote = False
undef = True
descr = "Zynq cpu_1x clock frequency in Hz"
class ZYNQ_CLOCK_UART(String):
value = "50000000UL"
tag = ["network"]
quote = False
undef = True
descr = "Zynq UART clock frequency in Hz"
class ZYNQ_CPUS(Integer):
value = 1
tag = ["general"]
quote = False
undef = True
descr = "Number of active cores"
class IS_DM3730(Boolean):
value = False
tag = ["build"]
undef = True
descr = "true if SOC is DM3730"
class IS_AM335X(Boolean):
value = False
tag = ["build"]
undef = True
descr = "true if SOC is AM335X"
class CONSOLE_POLLED(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Polled console i/o."
class CONSOLE_BAUD(Integer):
value = 115200
tag = ["network"]
undef = True
descr = "initial baud for console UART"
class ENABLE_SYSTEM_DEP(Boolean):
value = False
tag = ["general"]
undef = True
descr = "Enable dependencies on system headers, only useful if you are developing toolchains. This will slow down the build"
|
anandkp92/waf
|
py/waf/defaults/options.py
|
Python
|
gpl-2.0
| 57,446
|
[
"CRYSTAL"
] |
c26610bdb75285d87cfbd69c4aa5ab8cf78888d8205a2431daf1a0d45727dc7a
|
# -*- coding: utf-8 -*-
"""
====================
Zen
====================
* Tools for reading and writing files for Zen and processing software
* Tools for copying data from SD cards
* Tools for copying schedules to SD cards
Created on Tue Jun 11 10:53:23 2013
@author: jpeacock-pr
"""
#==============================================================================
#from __future__ import unicode_literals
import time
import datetime
import dateutil.parser
import os
import struct
import string
import shutil
import numpy as np
import mtpy.imaging.plotspectrogram as plotspectrogram
import mtpy.core.ts as mtts
try:
import win32api
except ImportError:
print("WARNING: Cannot find win32api, will not be able to detect"+
" drive names")
#==============================================================================
datetime_fmt = '%Y-%m-%d,%H:%M:%S'
datetime_sec = '%Y-%m-%d %H:%M:%S.%f'
#==============================================================================
# =============================================================================
# Get leap seconds
# =============================================================================
def calculate_leap_seconds(year, month, day):
"""
get the leap seconds for the given year to convert GPS time to UTC time
.. note:: GPS time started in 1980
.. note:: GPS time is leap seconds ahead of UTC time, therefore you
should subtract leap seconds from GPS time to get UTC time.
=========================== ===============================================
Date Range Leap Seconds
=========================== ===============================================
1981-07-01 - 1982-07-01 1
1982-07-01 - 1983-07-01 2
1983-07-01 - 1985-07-01 3
1985-07-01 - 1988-01-01 4
1988-01-01 - 1990-01-01 5
1990-01-01 - 1991-01-01 6
1991-01-01 - 1992-07-01 7
1992-07-01 - 1993-07-01 8
1993-07-01 - 1994-07-01 9
1994-07-01 - 1996-01-01 10
1996-01-01 - 1997-07-01 11
1997-07-01 - 1999-01-01 12
1999-01-01 - 2006-01-01 13
2006-01-01 - 2009-01-01 14
2009-01-01 - 2012-07-01 15
2012-07-01 - 2015-07-01 16
2015-07-01 - 2017-01-01 17
2017-01-01 - ????-??-?? 18
=========================== ===============================================
"""
leap_second_dict = {0: {'min':datetime.date(1980, 1, 1),
'max':datetime.date(1981, 7, 1)},
1: {'min':datetime.date(1981, 7, 1),
'max':datetime.date(1982, 7, 1)},
2: {'min':datetime.date(1982, 7, 1),
'max':datetime.date(1983, 7, 1)},
3: {'min':datetime.date(1983, 7, 1),
'max':datetime.date(1985, 7, 1)},
4: {'min':datetime.date(1985, 7, 1),
'max':datetime.date(1988, 1, 1)},
5: {'min':datetime.date(1988, 1, 1),
'max':datetime.date(1990, 1, 1)},
6: {'min':datetime.date(1990, 1, 1),
'max':datetime.date(1991, 1, 1)},
7: {'min':datetime.date(1991, 1, 1),
'max':datetime.date(1992, 7, 1)},
8: {'min':datetime.date(1992, 7, 1),
'max':datetime.date(1993, 7, 1)},
9: {'min':datetime.date(1993, 7, 1),
'max':datetime.date(1994, 7, 1)},
10: {'min':datetime.date(1994, 7, 1),
'max':datetime.date(1996, 1, 1)},
11: {'min':datetime.date(1996, 1, 1),
'max':datetime.date(1997, 7, 1)},
12: {'min':datetime.date(1997, 7, 1),
'max':datetime.date(1999, 1, 1)},
13: {'min':datetime.date(1999, 1, 1),
'max':datetime.date(2006, 1, 1)},
14: {'min':datetime.date(2006, 1, 1),
'max':datetime.date(2009, 1, 1)},
15: {'min':datetime.date(2009, 1, 1),
'max':datetime.date(2012, 6, 30)},
16: {'min':datetime.date(2012, 6, 30),
'max':datetime.date(2015, 6, 30)},
17: {'min':datetime.date(2015, 6, 30),
'max':datetime.date(2016, 12, 31)},
18: {'min':datetime.date(2016, 12, 31),
'max':datetime.date(2020, 12, 1)}}
year = int(year)
month = int(month)
day = int(day)
# make the date a datetime object, easier to test
given_date = datetime.date(year, month, day)
# made an executive decision that the date can be equal to the min, but
# no the max, otherwise get an error.
for leap_key in sorted(leap_second_dict.keys()):
if given_date < leap_second_dict[leap_key]['max'] and\
given_date >= leap_second_dict[leap_key]['min']:
return int(leap_key)
return None
#==============================================================================
class Z3DHeader(object):
"""
class for z3d header. This will read in the header information of a
Z3D file and make each metadata entry an attirbute
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3Dfile, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
_header_len lenght of header in bits (512)
ad_gain gain of channel
ad_rate sampling rate in Hz
alt altitude of the station (not reliable)
attenchannelsmask not sure
box_number ZEN box number
box_serial ZEN box serial number
channel channel number of the file
channelserial serial number of the channel board
duty duty cycle of the transmitter
fpga_buildnum build number of one of the boards
gpsweek GPS week
header_str full header string
lat latitude of station
logterminal not sure
long longitude of the station
main_hex_buildnum build number of the ZEN box in hexidecimal
numsats number of gps satelites
period period of the transmitter
tx_duty transmitter duty cycle
tx_freq transmitter frequency
version version of the firmware
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
convert_values convert the read in header metadata to
appropriate units and data types.
read_header read in the header data from the given file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> Z3Dfn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3DHeader()
>>> header_obj.read_header()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = fid
self.header_str = None
self._header_len = 512
self.ad_gain = None
self.ad_rate = None
self.alt = None
self.attenchannelsmask = None
self.box_number = None
self.box_serial = None
self.channel = None
self.channelserial = None
self.duty = None
self.fpga_buildnum = None
self.gpsweek = 1740
self.lat = None
self.logterminal = None
self.long = None
self.main_hex_buildnum = None
self.numsats = None
self.period = None
self.tx_duty = None
self.tx_freq = None
self.version = None
self.old_version = False
for key in kwargs:
setattr(self, key, kwargs[key])
@property
def data_logger(self):
return 'ZEN{0:03}'.format(int(self.box_number))
def read_header(self, fn=None, fid=None):
"""
read in the header string
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print('WARNING: No file to read')
elif self.fn is None:
if self.fid is not None:
self.fid.seek(0)
self.header_str = self.fid.read(self._header_len)
elif self.fn is not None:
if self.fid is None:
self.fid = open(self.fn, 'rb')
self.header_str = self.fid.read(self._header_len)
else:
self.fid.seek(0)
self.header_str = self.fid.read(self._header_len)
header_list = self.header_str.split(b'\n')
for h_str in header_list:
h_str = h_str.decode()
if h_str.find('=') > 0:
h_list = h_str.split('=')
h_key = h_list[0].strip().lower()
h_key = h_key.replace(' ', '_').replace('/', '').replace('.', '_')
h_value = self.convert_value(h_key, h_list[1].strip())
setattr(self, h_key, h_value)
elif len(h_str) == 0:
continue
# need to adjust for older versions of z3d files
elif h_str.count(',') > 1:
self.old_version = True
if h_str.find('Schedule') >= 0:
h_str = h_str.replace(',', 'T', 1)
for hh in h_str.split(','):
if hh.find(';') > 0:
m_key, m_value = hh.split(';')[1].split(':')
elif len(hh.split(':', 1)) == 2:
m_key, m_value = hh.split(':', 1)
else:
print(hh)
m_key = m_key.strip().lower().replace(' ', '_').replace('/', '').replace('.', '_')
m_value = self.convert_value(m_key, m_value.strip())
setattr(self, m_key, m_value)
def convert_value(self, key_string, value_string):
"""
convert the value to the appropriate units given the key
"""
try:
return_value = float(value_string)
except ValueError:
return_value = value_string
if key_string.lower() in ['lat', 'lon', 'long']:
return_value = np.rad2deg(float(value_string))
if 'lat' in key_string.lower():
if abs(return_value) > 90:
return_value = 0.0
elif 'lon' in key_string.lower():
if abs(return_value) > 180:
return_value = 0.0
return return_value
#==============================================================================
# meta data
#==============================================================================
class Z3DSchedule(object):
"""
class object for metadata of Z3d file. This will read in the schedule
information of a Z3D file and make each metadata entry an attirbute.
The attributes are left in capitalization of the Z3D file.
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3Dfile, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
AutoGain Auto gain for the channel
Comment Any comments for the schedule
Date Date of when the schedule action was started
YYYY-MM-DD
Duty Duty cycle of the transmitter
FFTStacks FFT stacks from the transmitter
Filename Name of the file that the ZEN gives it
Gain Gain of the channel
Log Log the data [ Y | N ]
NewFile Create a new file [ Y | N ]
Period Period of the transmitter
RadioOn Turn on the radio [ Y | N ]
SR Sampling Rate in Hz
SamplesPerAcq Samples per aquisition for transmitter
Sleep Set the box to sleep [ Y | N ]
Sync Sync with GPS [ Y | N ]
Time Time the schedule action started
HH:MM:SS (GPS time)
_header_len length of header in bits (512)
_schedule_metadata_len length of schedule metadata in bits (512)
fid file object of the file
fn file name to read in
meta_string string of the schedule
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
read_schedule read in the schedule information from the given
file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> Z3Dfn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3DSchedule()
>>> header_obj.read_schedule()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = fid
self.meta_string = None
self._schedule_metadata_len = 512
self._header_len = 512
self.AutoGain = None
self.Comment = None
self.Date = None
self.Duty = None
self.FFTStacks = None
self.Filename = None
self.Gain = None
self.Log = None
self.NewFile = None
self.Period = None
self.RadioOn = None
self.SR = None
self.SamplesPerAcq = None
self.Sleep = None
self.Sync = None
self.Time = None
self.datetime = None
for key in kwargs:
setattr(self, key, kwargs[key])
def read_schedule(self, fn=None, fid=None):
"""
read meta data string
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print('WARNING: No file to read')
elif self.fn is None:
if self.fid is not None:
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
elif self.fn is not None:
if self.fid is None:
self.fid = open(self.fn, 'rb')
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
else:
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
meta_list = self.meta_string.split(b'\n')
for m_str in meta_list:
m_str = m_str.decode()
if m_str.find('=') > 0:
m_list = m_str.split('=')
m_key = m_list[0].split('.')[1].strip()
m_key = m_key.replace('/', '')
m_value = m_list[1].strip()
setattr(self, m_key, m_value)
# the first good GPS stamp is on the 3rd, so need to add 2 seconds
try:
self.Time = '{0}{1:02}'.format(self.Time[0:6],
int(self.Time[6:])+2)
except TypeError:
return
self.datetime = datetime.datetime.strptime('{0},{1}'.format(self.Date,
self.Time),
datetime_fmt)
#==============================================================================
# Meta data class
#==============================================================================
class Z3DMetadata(object):
"""
class object for metadata of Z3d file. This will read in the metadata
information of a Z3D file and make each metadata entry an attirbute.
The attributes are left in capitalization of the Z3D file.
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3Dfile, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
_header_length length of header in bits (512)
_metadata_length length of metadata blocks (512)
_schedule_metadata_len length of schedule meta data (512)
board_cal board calibration np.ndarray()
cal_ant antenna calibration
cal_board board calibration
cal_ver calibration version
ch_azimuth channel azimuth
ch_cmp channel component
ch_length channel length (or # of coil)
ch_number channel number on the ZEN board
ch_xyz1 channel xyz location (not sure)
ch_xyz2 channel xyz location (not sure)
coil_cal coil calibration np.ndarray (freq, amp, phase)
fid file object
find_metadata boolean of finding metadata
fn full path to Z3D file
gdp_operator operater of the survey
gdp_progver program version
job_by job preformed by
job_for job for
job_name job name
job_number job number
m_tell location in the file where the last metadata
block was found.
rx_aspace electrode spacing
rx_sspace not sure
rx_xazimuth x azimuth of electrode
rx_xyz0 not sure
rx_yazimuth y azimuth of electrode
survey_type type of survey
unit_length length units (m)
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
read_metadata read in the metadata information from the given
file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> Z3Dfn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3DMetadata()
>>> header_obj.read_metadata()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = fid
self.find_metadata = True
self.board_cal = None
self.coil_cal = None
self._metadata_length = 512
self._header_length = 512
self._schedule_metadata_len = 512
self.m_tell = 0
self.cal_ant = None
self.cal_board = None
self.cal_ver = None
self.ch_azimuth = None
self.ch_cmp = None
self.ch_length = None
self.ch_number = None
self.ch_xyz1 = None
self.ch_xyz2 = None
self.gdp_operator = None
self.gdp_progver = None
self.job_by = None
self.job_for = None
self.job_name = None
self.job_number = None
self.rx_aspace = None
self.rx_sspace = None
self.rx_xazimuth = None
self.rx_xyz0 = None
self.rx_yazimuth = None
self.line_name = None
self.survey_type = None
self.unit_length = None
self.station = None
self.count = 0
self.notes = None
for key in kwargs:
setattr(self, key, kwargs[key])
def read_metadata(self, fn=None, fid=None):
"""
read meta data
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print('WARNING: No file to read')
elif self.fn is None:
if self.fid is not None:
self.fid.seek(self._header_length+self._schedule_metadata_len)
elif self.fn is not None:
if self.fid is None:
self.fid = open(self.fn, 'rb')
self.fid.seek(self._header_length+self._schedule_metadata_len)
else:
self.fid.seek(self._header_length+self._schedule_metadata_len)
# read in calibration and meta data
self.find_metadata = True
self.board_cal = []
self.coil_cal = []
self.count = 0
m_list = []
cal_find = False
while self.find_metadata == True:
try:
test_str = self.fid.read(self._metadata_length).decode().lower()
except UnicodeDecodeError:
self.find_metadata = False
break
if 'metadata' in test_str:
self.count += 1
test_str = test_str.strip().split('record')[1].strip()
if test_str.count('|') > 1:
for t_str in test_str.split('|'):
# get metadata name and value
if t_str.find('=') == -1 and \
t_str.lower().find('line.name') == -1:
# get metadata for older versions of z3d files
if len(t_str.split(',')) == 2:
t_list = t_str.lower().split(',')
t_key = t_list[0].strip().replace('.', '_')
if t_key == 'ch_varasp':
t_key = 'ch_length'
t_value = t_list[1].strip()
setattr(self, t_key, t_value)
if t_str.count(' ') > 1:
self.notes = t_str
# get metadata for just the line that has line name
# because for some reason that is still comma separated
elif t_str.lower().find('line.name') >= 0:
t_list = t_str.split(',')
t_key = t_list[0].strip().replace('.', '_')
t_value = t_list[1].strip()
setattr(self, t_key.lower(), t_value)
# get metadata for newer z3d files
else:
t_list = t_str.split('=')
t_key = t_list[0].strip().replace('.', '_')
t_value = t_list[1].strip()
setattr(self, t_key.lower(), t_value)
elif 'cal.brd' in test_str:
t_list = test_str.split(',')
t_key = t_list[0].strip().replace('.', '_')
setattr(self, t_key.lower(), t_list[1])
for t_str in t_list[2:]:
t_str = t_str.replace('\x00', '').replace('|', '')
try:
self.board_cal.append([float(tt.strip())
for tt in t_str.strip().split(':')])
except ValueError:
self.board_cal.append([tt.strip()
for tt in t_str.strip().split(':')])
# some times the coil calibration does not start on its own line
# so need to parse the line up and I'm not sure what the calibration
# version is for so I have named it odd
elif 'cal.ant' in test_str:
# check to see if the coil calibration exists
cal_find = True
test_list = test_str.split(',')
coil_num = test_list[1].split('|')[1]
coil_key, coil_value = coil_num.split('=')
setattr(self, coil_key.replace('.', '_').lower(),
coil_value.strip())
for t_str in test_list[2:]:
if '\x00' in t_str:
break
self.coil_cal.append([float(tt.strip())
for tt in t_str.split(':')])
elif cal_find and self.count > 3:
t_list = test_str.split(',')
for t_str in t_list:
if '\x00' in t_str:
break
else:
self.coil_cal.append([float(tt.strip())
for tt in t_str.strip().split(':')])
else:
self.find_metadata = False
# need to go back to where the meta data was found so
# we don't skip a gps time stamp
self.m_tell = self.fid.tell() - self._metadata_length
# make coil calibration and board calibration structured arrays
if len(self.coil_cal) > 0:
self.coil_cal = np.core.records.fromrecords(self.coil_cal,
names='frequency, amplitude, phase')
if len(self.board_cal) > 0:
try:
self.board_cal = np.core.records.fromrecords(self.board_cal,
names='frequency, rate, amplitude, phase')
except ValueError:
self.board_cal = None
try:
self.station = '{0}{1}'.format(self.line_name,
self.rx_xyz0.split(':')[0])
except AttributeError:
if hasattr(self, 'rx_stn'):
self.station = f"{self.rx_stn}"
elif hasattr(self, 'ch_stn'):
self.station = f"{self.ch_stn}"
else:
self.station = None
print("WARNING: Need to input station name")
#==============================================================================
#
#==============================================================================
class Zen3D(object):
"""
Deals with the raw Z3D files output by zen.
Arguments
-----------
**fn** : string
full path to .Z3D file to be read in
======================== ================================ =================
Attributes Description Default Value
======================== ================================ =================
_block_len length of data block to read in 65536
as chunks faster reading
_counts_to_mv_conversion conversion factor to convert 9.53674316406e-10
counts to mv
_gps_bytes number of bytes for a gps stamp 16
_gps_dtype data type for a gps stamp see below
_gps_epoch starting date of GPS time
format is a tuple (1980, 1, 6, 0,
0, 0, -1, -1, 0)
_gps_f0 first gps flag in raw binary
_gps_f1 second gps flag in raw binary
_gps_flag_0 first gps flag as an int32 2147483647
_gps_flag_1 second gps flag as an int32 -2147483648
_gps_stamp_length bit length of gps stamp 64
_leap_seconds leap seconds, difference 16
between UTC time and GPS
time. GPS time is ahead
by this much
_week_len week length in seconds 604800
df sampling rate of the data 256
fn Z3D file name None
gps_flag full gps flag _gps_f0+_gps_f1
gps_stamps np.ndarray of gps stamps None
header Z3DHeader object Z3DHeader
metadata Z3DMetadata Z3DMetadata
schedule Z3DSchedule Z3DSchedule
time_series np.ndarra(len_data) None
units units in which the data is in counts
zen_schedule time when zen was set to None
run
======================== ================================ =================
* gps_dtype is formated as np.dtype([('flag0', np.int32),
('flag1', np.int32),
('time', np.int32),
('lat', np.float64),
('lon', np.float64),
('num_sat', np.int32),
('gps_sens', np.int32),
('temperature', np.float32),
('voltage', np.float32),
('num_fpga', np.int32),
('num_adc', np.int32),
('pps_count', np.int32),
('dac_tune', np.int32),
('block_len', np.int32)])
============================ ==============================================
Methods Description
============================ ==============================================
apply_addaptive_notch_filter apply a notch filter to the data, usually
to remove 60 Hz noise and harmonics
get_gps_time converts the gps counts to relative epoch
seconds according to gps week.
get_UTC_date_time converts gps seconds into the actual date and
time in UTC. Note this is different than GPS
time which is how the zen is scheduled, so
the time will be off by the current amount of
leap seconds.
plot_timeseries make a generic plot of the time series
plot_spectra plot a the spectra in loglog scales.
plot_spectrogram plot the spectragram of the data.
read_z3d read 3D file making sure all the time stamps
are correctly spaced. Returned time series
starts at the first stamp which has the
correct amount of data points between it and
the next time stamp. Note there are usually
a few seconds at the end and maybe beginning
that aren't correct because the internal
computer is busy switchin sampling rate.
read_header read just the header data from the Z3D file
read_metadata read just the metadata from the Z3D file
read_schedule read just the schedule info from the Z3D file
validate_gps_time make sure each time stamp is 1 second apart
validate_time_blocks make sure that the size of each time block
between stamps is equal to the sampling rate
write_ascii_mt_file write an mtpy ascii file of the data
============================ ==============================================
Example
----------------
>>> import mtpy.usgs.zen as zen
>>> zt = zen.Zen3D(r"/home/mt/mt00/mt00_20150522_080000_256_EX.Z3D")
>>> zt.read_z3d()
>>> ------- Reading /home/mt/mt00/mt00_20150522_080000_256_EX.Z3D -----
--> Reading data took: 0.322 seconds
Scheduled time was 2015-05-22,08:00:16 (GPS time)
1st good stamp was 2015-05-22,08:00:18 (GPS time)
difference of 2.00 seconds
found 6418 GPS time stamps
found 1642752 data points
>>> zt.plot_time_series()
"""
def __init__(self, fn=None, **kwargs):
self.fn = fn
self.header = Z3DHeader(fn)
self.schedule = Z3DSchedule(fn)
self.metadata = Z3DMetadata(fn)
self._gps_stamp_length = kwargs.pop('stamp_len', 64)
self._gps_bytes = self._gps_stamp_length/4
self.gps_stamps = None
self._gps_flag_0 = np.int32(2147483647)
self._gps_flag_1 = np.int32(-2147483648)
self._gps_f0 = self._gps_flag_0.tostring()
self._gps_f1 = self._gps_flag_1.tostring()
self.gps_flag = self._gps_f0 + self._gps_f1
self._gps_dtype = np.dtype([('flag0', np.int32),
('flag1', np.int32),
('time', np.int32),
('lat', np.float64),
('lon', np.float64),
('num_sat', np.int32),
('gps_sens', np.int32),
('temperature', np.float32),
('voltage', np.float32),
('num_fpga', np.int32),
('num_adc', np.int32),
('pps_count', np.int32),
('dac_tune', np.int32),
('block_len', np.int32)])
self._week_len = 604800
self._gps_epoch = (1980, 1, 6, 0, 0, 0, -1, -1, 0)
self._leap_seconds = 18
self._block_len = 2**16
# the number in the cac files is for volts, we want mV
self._counts_to_mv_conversion = 9.5367431640625e-10 * 1E3
self.num_sec_to_skip = 3
self.units = 'counts'
self.df = None
self.ts_obj = mtts.MTTS()
@property
def station(self):
"""
station name
"""
return self.metadata.station
@station.setter
def station(self, station):
"""
station name
"""
self.metadata.station = station
@property
def dipole_len(self):
"""
dipole length
"""
if self.metadata.ch_length is not None:
return self.metadata.ch_length
elif hasattr(self.metadata, 'ch_offset_xyz1'):
# only ex and ey have xyz2
if hasattr(self.metadata, 'ch_offset_xyz2'):
x1, y1, z1 = [float(offset) for offset in
self.metadata.ch_offset_xyz1.split(':')]
x2, y2, z2 = [float(offset) for offset in
self.metadata.ch_offset_xyz2.split(':')]
length = np.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
return np.round(length, 2)
else:
return 0
elif self.metadata.ch_xyz1 is not None:
x1, y1 = [float(d) for d in self.metadata.ch_xyz1.split(':')]
x2, y2 = [float(d) for d in self.metadata.ch_xyz2.split(':')]
length = np.sqrt((x2-x1)**2+(y2-y1)**2)*100.
return np.round(length, 2)
@property
def azimuth(self):
"""
azimuth of instrument setup
"""
if self.metadata.ch_azimuth is not None:
return float(self.metadata.ch_azimuth)
elif self.metadata.rx_xazimuth is not None:
return float(self.metadata.rx_xazimuth)
else:
return None
@property
def component(self):
"""
channel
"""
return self.metadata.ch_cmp.lower()
@property
def lat(self):
"""
latitude in decimal degrees
"""
return self.header.lat
@property
def lon(self):
"""
longitude in decimal degrees
"""
return self.header.long
@property
def elev(self):
"""
elevation in meters
"""
return self.header.alt
@property
def df(self):
"""
sampling rate
"""
return self.header.ad_rate
@df.setter
def df(self, sampling_rate):
"""
sampling rate
"""
if sampling_rate is not None:
self.header.ad_rate = float(sampling_rate)
@property
def zen_schedule(self):
"""
zen schedule data and time
"""
if self.header.old_version is True:
dt_str = self.header.schedule.replace('T', ',')
self.schedule.Date = dt_str.split(',')[0]
self.schedule.Time = dt_str.split(',')[1]
# the first good GPS stamp is on the 3rd, so need to add 2 seconds
self.schedule.Time = '{0}{1:02}'.format(self.schedule.Time[0:6],
int(self.schedule.Time[6:])+2)
self.schedule.datetime = datetime.datetime.strptime('{0},{1}'.format(self.schedule.Date,
self.schedule.Time),
datetime_fmt)
return self.schedule.datetime
@zen_schedule.setter
def zen_schedule(self, schedule_dt):
"""
on setting set schedule datetime
"""
if type(schedule_dt) is not datetime.datetime:
raise TypeError('New schedule datetime must be type datetime.datetime')
self.schedule.datetime = schedule_dt
# set the leap seconds
self._leap_seconds = calculate_leap_seconds(self.schedule.datetime.year,
self.schedule.datetime.month,
self.schedule.datetime.day)
@property
def coil_num(self):
"""
coil number
"""
if self.metadata.cal_ant is not None:
return self.metadata.cal_ant
elif self.metadata.ch_number is not None:
return self.metadata.ch_number
else:
return None
def _get_gps_stamp_type(self, old_version=False):
"""
get the correct stamp type.
Older versions the stamp length was 36 bits
New versions have a 64 bit stamp
"""
if old_version is True:
self._gps_dtype = np.dtype([('gps', np.int32),
('time', np.int32),
('lat', np.float64),
('lon', np.float64),
('block_len', np.int32),
('gps_accuracy', np.int32),
('temperature', np.float32)])
self._gps_stamp_length = 36
self._gps_bytes = self._gps_stamp_length/4
self._gps_flag_0 = -1
self._block_len = int(self._gps_stamp_length+self.df*4)
self.gps_flag = self._gps_f0
else:
return
#======================================
def _read_header(self, fn=None, fid=None):
"""
read header information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.header object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_header(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dfid = open(fn, 'rb')
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_header(fid=Z3Dfid)
"""
if fn is not None:
self.fn = fn
self.header.read_header(fn=self.fn, fid=fid)
if self.header.old_version:
if self.header.box_number is None:
self.header.box_number = '6666'
#======================================
def _read_schedule(self, fn=None, fid=None):
"""
read schedule information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.schedule object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_schedule(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dfid = open(fn, 'rb')
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_schedule(fid=Z3Dfid)
"""
if fn is not None:
self.fn = fn
self.schedule.read_schedule(fn=self.fn, fid=fid)
if self.header.old_version:
dt_str = self.header.schedule.replace('T', ',')
self.schedule.Date = dt_str.split(',')[0]
self.schedule.Time = dt_str.split(',')[1]
year, month, day = [int(dd) for dd in self.schedule.Date.split('-')]
hour, minute, second = [int(dd) for dd in self.schedule.Time.split(':')]
self.schedule.datetime = datetime.datetime(year, month, day,
hour, minute, second)
# set the leap seconds
self._leap_seconds = calculate_leap_seconds(self.schedule.datetime.year,
self.schedule.datetime.month,
self.schedule.datetime.day)
#======================================
def _read_metadata(self, fn=None, fid=None):
"""
read header information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.metadata object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_metadata(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dfid = open(fn, 'rb')
>>> Z3Dobj = zen.Zen3D()
>>> Z3Dobj.read_metadata(fid=Z3Dfid)
"""
if fn is not None:
self.fn = fn
if self.header.old_version:
self.metadata._schedule_metadata_len = 0
self.metadata.read_metadata(fn=self.fn, fid=fid)
#=====================================
def read_all_info(self):
"""
Read header, schedule, and metadata
"""
with open(self.fn, 'rb') as file_id:
self._read_header(fid=file_id)
self._read_schedule(fid=file_id)
self._read_metadata(fid=file_id)
#======================================
def read_z3d(self, Z3Dfn=None):
"""
read in z3d file and populate attributes accordingly
read in the entire file as if everything but header and metadata are
np.int32, then extract the gps stamps and convert accordingly
Checks to make sure gps time stamps are 1 second apart and incrementing
as well as checking the number of data points between stamps is the
same as the sampling rate.
Converts gps_stamps['time'] to seconds relative to header.gps_week
We skip the first two gps stamps because there is something wrong with
the data there due to some type of buffering.
Therefore the first GPS time is when the time series starts, so you
will notice that gps_stamps[0]['block_len'] = 0, this is because there
is nothing previous to this time stamp and so the 'block_len' measures
backwards from the corresponding time index.
"""
if Z3Dfn is not None:
self.fn = Z3Dfn
#print(u'------- Reading {0} ---------'.format(self.fn))
st = time.time()
#get the file size to get an estimate of how many data points there are
file_size = os.path.getsize(self.fn)
# using the with statement works in Python versions 2.7 or higher
# the added benefit of the with statement is that it will close the
# file object upon reading completion.
with open(self.fn, 'rb') as file_id:
self._read_header(fid=file_id)
self._read_schedule(fid=file_id)
self._read_metadata(fid=file_id)
if self.header.old_version is True:
self._get_gps_stamp_type(True)
# move the read value to where the end of the metadata is
file_id.seek(self.metadata.m_tell)
# initalize a data array filled with zeros, everything goes into
# this array then we parse later
data = np.zeros(int((file_size-512*(1+self.metadata.count))/4 + 8*self.df),
dtype=np.int32)
# go over a while loop until the data cound exceed the file size
data_count = 0
while True:
# need to make sure the last block read is a multiple of 32 bit
read_len = min([self._block_len,
int(32*((file_size-file_id.tell())//32))])
test_str = np.fromstring(file_id.read(read_len),
dtype=np.int32)
if len(test_str) == 0:
break
data[data_count:data_count+len(test_str)] = test_str
data_count += test_str.size
self.raw_data = data.copy()
# find the gps stamps
gps_stamp_find = self.get_gps_stamp_index(data, self.header.old_version)
# skip the first two stamps and trim data
try:
data = data[gps_stamp_find[self.num_sec_to_skip]:]
except IndexError:
raise ZenGPSError("Data is bad, cannot open file {0}".format(self.fn))
# find gps stamps of the trimmed data
gps_stamp_find = self.get_gps_stamp_index(data, self.header.old_version)
self.gps_stamps = np.zeros(len(gps_stamp_find), dtype=self._gps_dtype)
for ii, gps_find in enumerate(gps_stamp_find):
try:
data[gps_find+1]
except IndexError:
pass
print('***Failed gps stamp***')
print(' stamp {0} out of {1}'.format(ii+1,
len(gps_stamp_find)))
break
if self.header.old_version is True or data[gps_find+1] == self._gps_flag_1:
gps_str = struct.pack('<'+'i'*int(self._gps_bytes),
*data[int(gps_find):int(gps_find+self._gps_bytes)])
self.gps_stamps[ii] = np.fromstring(gps_str,
dtype=self._gps_dtype)
if ii > 0:
self.gps_stamps[ii]['block_len'] = gps_find-\
gps_stamp_find[ii-1]-self._gps_bytes
elif ii == 0:
self.gps_stamps[ii]['block_len'] = 0
data[int(gps_find):int(gps_find+self._gps_bytes)] = 0
# fill the time series object
self._fill_ts_obj(data[np.nonzero(data)])
print(' found {0} GPS time stamps'.format(self.gps_stamps.shape[0]))
print(' found {0} data points'.format(self.ts_obj.ts.data.size))
# time it
et = time.time()
print('INFO: --> Reading data took: {0:.3f} seconds'.format(et-st))
#=================================================
def _fill_ts_obj(self, ts_data):
"""
fill time series object
"""
# fill the time series object
self.ts_obj = mtts.MTTS()
self.ts_obj.ts = ts_data
# convert data to mV
self.convert_counts_to_mv()
self.ts_obj.ts = self.ts_obj.ts.astype(np.float32)
self.validate_time_blocks()
self.convert_gps_time()
self.zen_schedule = self.check_start_time()
# fill time series object metadata
self.ts_obj.station = self.station
self.ts_obj.sampling_rate = float(self.df)
self.ts_obj.start_time_utc = self.zen_schedule.isoformat()
self.ts_obj.component = self.component
self.ts_obj.coordinate_system = 'geomagnetic'
try:
self.ts_obj.dipole_length = float(self.dipole_len)
except TypeError:
self.ts_obj.dipole_length = -666
try:
self.ts_obj.azimuth = float(self.azimuth)
except TypeError:
self.ts_obj.azimuth = -666
self.ts_obj.units = 'mV'
self.ts_obj.lat = self.lat
self.ts_obj.lon = self.lon
self.ts_obj.datum = 'WGS84'
self.ts_obj.data_logger = self.header.data_logger
self.ts_obj.elev = self.elev
self.ts_obj.instrument_id = self.coil_num
self.ts_obj.calibration_fn = None
self.ts_obj.declination = 0.0
self.ts_obj.conversion = self._counts_to_mv_conversion
self.ts_obj.gain = self.header.ad_gain
self.ts_obj.channel_number = int(self.header.channel)
self.ts_obj.fn = os.path.basename(self.fn)
# =================================================
def get_gps_stamp_index(self, ts_data, old_version=False):
"""
locate the time stamps in a given time series.
Looks for gps_flag_0 first, if the file is newer, then makes sure the
next value is gps_flag_1
:returns: list of gps stamps indicies
"""
# find the gps stamps
gps_stamp_find = np.where(ts_data == self._gps_flag_0)[0]
if old_version is False:
gps_stamp_find = [gps_find for gps_find in gps_stamp_find
if ts_data[gps_find+1] == self._gps_flag_1]
return gps_stamp_find
#=================================================
def trim_data(self):
"""
apparently need to skip the first 3 seconds of data because of
something to do with the SD buffer
This method will be deprecated after field testing
"""
# the block length is the number of data points before the time stamp
# therefore the first block length is 0. The indexing in python
# goes to the last index - 1 so we need to put in 3
ts_skip = self.gps_stamps['block_len'][0:3].sum()
self.gps_stamps = self.gps_stamps[2:]
self.gps_stamps[0]['block_len'] = 0
self.time_series = self.time_series[ts_skip:]
#=================================================
def check_start_time(self):
"""
check to make sure the scheduled start time is similar to
the first good gps stamp
"""
# make sure the time is in gps time
zen_start_utc = self.get_UTC_date_time(self.header.gpsweek,
self.gps_stamps['time'][0])
# calculate the scheduled start time
s_start = '{0},{1}'.format(self.schedule.Date, self.schedule.Time)
schedule_time = datetime.datetime.strptime(s_start, datetime_fmt)
# reset the data and time in the schedule meta data so there is no
# confusion on when the time series starts
self.schedule.Date = zen_start_utc.strftime(u'%Y-%m-%d')
self.schedule.Time = zen_start_utc.strftime(u'%H:%M:%S')
# estimate the time difference between the two
time_diff = self.zen_schedule - schedule_time
print(' Scheduled time was {0} (GPS time)'.format(s_start))
print(' 1st good stamp was {0} (UTC time)'.format(zen_start_utc.isoformat()))
print(' difference of {0:.2f} seconds'.format(time_diff.total_seconds()))
return zen_start_utc
#==================================================
def validate_gps_time(self):
"""
make sure each time stamp is 1 second apart
"""
t_diff = np.zeros_like(self.gps_stamps['time'])
for ii in range(len(t_diff)-1):
t_diff[ii] = self.gps_stamps['time'][ii]-self.gps_stamps['time'][ii+1]
bad_times = np.where(abs(t_diff) > 0.5)[0]
if len(bad_times) > 0:
print('-'*50)
for bb in bad_times:
print('WARNING: bad time at index {0} > 0.5 s'.format(bb))
#===================================================
def validate_time_blocks(self):
"""
validate gps time stamps and make sure each block is the proper length
"""
# first check if the gps stamp blocks are of the correct length
bad_blocks = np.where(self.gps_stamps['block_len'][1:] !=
self.header.ad_rate)[0]
if len(bad_blocks) > 0:
if bad_blocks.max() < 5:
ts_skip = self.gps_stamps['block_len'][0:bad_blocks[-1]+1].sum()
self.gps_stamps = self.gps_stamps[bad_blocks[-1]:]
self.time_series = self.time_series[ts_skip:]
print('WARNING: Skipped the first {0} seconds'.format(
bad_blocks[-1]))
print('WARNING: Skipped first {0} poins in time series'.format(
ts_skip))
#==================================================
def convert_gps_time(self):
"""
convert gps time integer to relative seconds from gps_week
"""
# need to convert gps_time to type float from int
dt = self._gps_dtype.descr
if self.header.old_version is True:
dt[1] = ('time', np.float32)
else:
dt[2] = ('time', np.float32)
self.gps_stamps = self.gps_stamps.astype(np.dtype(dt))
# convert to seconds
# these are seconds relative to the gps week
time_conv = self.gps_stamps['time'].copy()/1024.
time_ms = (time_conv-np.floor(time_conv))*1.024
time_conv = np.floor(time_conv)+time_ms
self.gps_stamps['time'][:] = time_conv
#==================================================
def convert_counts_to_mv(self):
"""
convert the time series from counts to millivolts
"""
self.ts_obj.ts.data *= self._counts_to_mv_conversion
#==================================================
def convert_mv_to_counts(self):
"""
convert millivolts to counts assuming no other scaling has been applied
"""
self.ts_obj.ts.data /= self._counts_to_mv_conversion
#==================================================
def get_gps_time(self, gps_int, gps_week=0):
"""
from the gps integer get the time in seconds.
Arguments
-------------
**gps_int**: int
integer from the gps time stamp line
**gps_week**: int
relative gps week, if the number of seconds is
larger than a week then a week is subtracted from
the seconds and computed from gps_week += 1
Returns
---------
**gps_time**: int
number of seconds from the beginning of the relative
gps week.
"""
gps_seconds = gps_int/1024.
gps_ms = (gps_seconds-np.floor(gps_int/1024.))*(1.024)
cc = 0
if gps_seconds > self._week_len:
gps_week += 1
cc = gps_week*self._week_len
gps_seconds -= self._week_len
gps_time = np.floor(gps_seconds)+gps_ms+cc
return gps_time, gps_week
#==================================================
def get_UTC_date_time(self, gps_week, gps_time):
"""
get the actual date and time of measurement as UTC.
.. note:: GPS time is curently ahead by 18 (after 2016) seconds from
UTC time.
Arguments
-------------
**gps_week**: int
integer value of gps_week that the data was collected
**gps_time**: int
number of seconds from beginning of gps_week
**leap_seconds**: int
number of seconds gps time is off from UTC time.
It is currently off by 18 seconds (after 2016).
Returns
------------
**date_time**: YYYY-MM-DD,HH:MM:SS
formated date and time from gps seconds.
"""
# need to check to see if the time in seconds is more than a gps week
# if it is add 1 to the gps week and reduce the gps time by a week
if gps_time > self._week_len:
gps_week += 1
gps_time -= self._week_len
#mseconds = gps_time % 1
#make epoch in seconds, mktime computes local time, need to subtract
#time zone to get UTC
epoch_seconds = time.mktime(self._gps_epoch)-time.timezone
#gps time is 18 seconds ahead of GTC time
utc_seconds = epoch_seconds+(gps_week*self._week_len)+gps_time-\
self._leap_seconds
# compute date and time from seconds and return a datetime object
# easier to manipulate later
date_time = datetime.datetime.utcfromtimestamp(utc_seconds)
return date_time
#==================================================
def apply_adaptive_notch_filter(self, notch_dict={'notches':np.arange(60, 1860, 60),
'notch_radius':0.5,
'freq_rad':0.5,
'rp':0.1}):
"""
apply notch filter to the data that finds the peak around each
frequency.
see mtpy.processing.filter.adaptive_notch_filter
Arguments
-------------
**notch_dict** : dictionary
dictionary of filter parameters.
if an empty dictionary is input the filter looks
for 60 Hz and harmonics to filter out.
"""
try:
notch_dict['notches']
except KeyError:
return
try:
self.ts_obj.ts.data
except AttributeError:
self.read_z3d()
self.ts_obj.ts = self.ts_obj.apply_addaptive_notch_filter(**notch_dict)
#==================================================
def write_ascii_mt_file(self, save_fn=None, fmt='%.8e', notch_dict=None,
dec=1):
"""
write an mtpy time series data file
Arguments
-------------
**save_fn** : full path to save file, if None file is saved as:
station_YYYYMMDD_hhmmss_df.component
ex. mt01_20130206_120000_256.HX
**fmt** : string format
format of data numbers output to ascii file.
*default* is '%.8e' for 8 significan figures in
scientific notation.
**ex** : float
scaling parameter of ex line, the length of the dipole
be careful to not scale when creating an .edi file
*default* is 1
**ey** : float
scaling parameter of ey line, the length of the dipole
be careful to not scale when creating an .edi file
*default* is 1
**notch_dict** : dictionary
dictionary of notch filter parameters
*default* is None
if an empty dictionary is input then the
filter looks for 60 Hz and harmonics to filter
**dec** : int
decimation factor
*default* is 1
Output
-------------
**fn_mt_ascii** : full path to saved file
Example
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> Z3Dobj = zen.Zen3D(fn)
>>> asc_fn = Z3Dobj.write_ascii_mt_file(save_station='mt', notch_dict={})
"""
if self.station is None:
self.read_all_info()
if dec > 1:
print('INFO: Decimating data by factor of {0}'.format(dec))
self.df = self.df/dec
# make a new file name to save to that includes the meta information
if save_fn is None:
svfn_directory = os.path.join(os.path.dirname(self.fn), 'TS')
if not os.path.exists(svfn_directory):
os.mkdir(svfn_directory)
svfn_date = ''.join(self.schedule.Date.split('-'))
svfn_time = ''.join(self.schedule.Time.split(':'))
self.fn_mt_ascii = os.path.join(svfn_directory,
'{0}_{1}_{2}_{3}.{4}'.format(self.station,
svfn_date,
svfn_time,
int(self.df),
self.metadata.ch_cmp.upper()))
else:
self.fn_mt_ascii = save_fn
# if the file already exists skip it
if os.path.isfile(self.fn_mt_ascii) == True:
print('\t************')
print('\tmtpy file already exists for {0} --> {1}'.format(self.fn,
self.fn_mt_ascii))
print('\tskipping')
print('\t************')
# if there is a decimation factor need to read in the time
# series data to get the length.
c = self.ts_obj.read_ascii_header(self.fn_mt_ascii)
self.zen_schedule = dateutil.parser.parse(self.ts_obj.start_time_utc)
return
# read in time series data if haven't yet.
if len(self.ts_obj.ts) <=1:
self.read_z3d()
# decimate the data. try resample at first, see how that goes
# make the attribute time series equal to the decimated data.
if dec > 1:
self.ts_obj.decimate(dec)
# apply notch filter if desired
if notch_dict is not None:
self.apply_adaptive_notch_filter(notch_dict)
# convert counts to mV and scale accordingly
# self.convert_counts() #--> data is already converted to mV
# calibrate electric channels should be in mV/km
if self.component in ['ex', 'ey']:
e_scale = float(self.dipole_len)
self.ts_obj.ts.data /= e_scale/1000.
print('INFO: Using scales {0} = {1} m'.format(self.metadata.ch_cmp.upper(),
e_scale))
self.ts_obj.units = 'mV/km'
self.ts_obj.write_ascii_file(fn_ascii=self.fn_mt_ascii)
print('INFO: Wrote mtpy timeseries file to {0}'.format(self.fn_mt_ascii))
#==================================================
def plot_time_series(self, fig_num=1):
"""
plots the time series
"""
self.ts_obj.ts.plot(x_compat=True)
#==================================================
def plot_spectrogram(self, time_window=2**8, time_step=2**6, s_window=11,
frequency_window=1, n_freq_bins=2**9, sigma_L=None):
"""
plot the spectrogram of the data using the S-method
Arguments:
-----------
**s_window** : int (should be odd)
length of window for S-method calculation, higher numbers tend
toward WVD
**time_window** : int (should be power of 2)
window length for each time step
*default* is 2**8 = 256
**frequency_window** : int (should be odd)
length of smoothing window along frequency plane
**time_step** : int
number of sample between short windows
*default* is 2**7 = 128
**sigmaL** : float
full width half max of gaussian window for L
**n_freq_bins** : int
(should be power of 2 and equal or larger than nh)
number of frequency bins
Returns:
---------
**ptf** : mtpy.imaging.plotspectrogram.PlotTF object
"""
kwargs = {'nh':time_window, 'tstep':time_step, 'L':s_window,
'ng':frequency_window, 'df':self.df, 'nfbins':n_freq_bins,
'sigmaL': sigma_L}
ptf = plotspectrogram.PlotTF(self.ts_obj.ts.data.to_numpy(), **kwargs)
return ptf
#==================================================
def plot_spectra(self, fig_num=2):
"""
plot the spectra of time series
"""
self.ts_obj.plot_spectra(fig_num=fig_num)
#==============================================================================
# read and write a zen schedule
#==============================================================================
class ZenSchedule(object):
"""
deals with reading, writing and copying schedule
Creates a repeating schedule based on the master_schedule. It will
then change the first scheduling action to coincide with the master
schedule, such that all deployed boxes will have the same schedule.
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zs = zen.ZenSchedule()
>>> zs.write_schedule('MT01', dt_offset='2013-06-23,04:00:00')
====================== ====================================================
Attributes Description
====================== ====================================================
ch_cmp_dict dictionary for channel components with keys being
the channel number and values being the channel
label
ch_num_dict dictionary for channel components whith keys
being channel label and values being channel number
df_list sequential list of sampling rates to repeat in
schedule
df_time_list sequential list of time intervals to measure for
each corresponding sampling rate
dt_format date and time format. *default* is
YYY-MM-DD,hh:mm:ss
dt_offset start date and time of schedule in dt_format
gain_dict dictionary of gain values for channel number
initial_dt initial date, or dummy zero date for scheduling
light_dict dictionary of light color values for schedule
master_schedule the schedule that all data loggers should schedule
at. Will taylor the schedule to match the master
schedule according to dt_offset
meta_dict dictionary for meta data
meta_keys keys for meta data dictionary
sa_keys keys for schedule actions
sa_list list of schedule actions including time and df
sr_dict dictionary of sampling rate values
verbose [ True | False ] True to print information to
console
====================== ====================================================
"""
def __init__(self):
self.verbose = True
self.sr_dict = {'256':'0', '512':'1', '1024':'2', '2048':'3',
'4096':'4'}
self.gain_dict = dict([(mm, 2**mm) for mm in range(7)])
self.sa_keys = ['date', 'time', 'resync_yn', 'log_yn', 'tx_duty',
'tx_period', 'sr', 'gain', 'nf_yn']
self.sa_list = []
self.ch_cmp_dict = {'1':'hx', '2':'hy', '3':'hz', '4':'ex', '5':'ey',
'6':'hz'}
self.ch_num_dict = dict([(self.ch_cmp_dict[key], key)
for key in self.ch_cmp_dict])
self.meta_keys = ['TX.ID', 'RX.STN', 'Ch.Cmp', 'Ch.Number',
'Ch.varAsp']
self.meta_dict = {'TX.ID':'none', 'RX.STN':'01', 'Ch.Cmp':'HX',
'Ch.Number':'1', 'Ch.varAsp':50}
self.light_dict = {'YellowLight':0,
'BlueLight':1,
'RedLight':0,
'GreenLight':1}
self.dt_format = datetime_fmt
self.initial_dt = '2000-01-01,00:00:00'
self.dt_offset = time.strftime(datetime_fmt ,time.gmtime())
self.df_list = (4096, 256)
self.df_time_list = ('00:10:00','07:50:00')
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=16)
self._resync_pause = 20
#==================================================
def read_schedule(self, fn):
"""
read zen schedule file
"""
sfid = open(fn, 'r')
lines = sfid.readlines()
for line in lines:
if line.find('scheduleaction') == 0:
line_list = line.strip().split(' ')[1].split(',')
sa_dict = {}
for ii, key in enumerate(self.sa_keys):
sa_dict[key] = line_list[ii]
self.sa_list.append(sa_dict)
elif line.find('metadata'.upper()) == 0:
line_list = line.strip().split(' ')[1].split('|')
for md in line_list[:-1]:
md_list = md.strip().split(',')
self.meta_dict[md_list[0]] = md_list[1]
elif line.find('offset') == 0:
line_str = line.strip().split(' ')
self.offset = line_str[1]
elif line.find('Light') > 0:
line_list = line.strip().split(' ')
try:
self.light_dict[line_list[0]]
self.light_dict[line_list[0]] = line_list[1]
except KeyError:
pass
#==================================================
def add_time(self, date_time, add_minutes=0, add_seconds=0, add_hours=0,
add_days=0):
"""
add time to a time string
assuming date_time is in the format YYYY-MM-DD,HH:MM:SS
"""
fulldate = datetime.datetime.strptime(date_time, self.dt_format)
fulldate = fulldate + datetime.timedelta(days=add_days,
hours=add_hours,
minutes=add_minutes,
seconds=add_seconds)
return fulldate
#==================================================
def make_schedule(self, df_list, df_length_list, repeat=5, t1_dict=None):
"""
make a repeated schedule given list of sampling frequencies and
duration for each.
Arguments:
-----------
**df_list** : list
list of sampling frequencies in Hz, note needs to be
powers of 2 starting at 256
**df_length_list** : list
list of durations in hh:mm:ss format
**repeat** : int
number of times to repeat the sequence
**t1_dict** : dictionary
dictionary returned from get_schedule_offset
Returns:
--------
**time_list**: list of dictionaries with keys:
* 'dt' --> date and time of schedule event
* 'df' --> sampling rate for that event
"""
df_list = np.array(df_list)
df_length_list = np.array(df_length_list)
ndf = len(df_list)
if t1_dict is not None:
time_list = [{'dt':self.initial_dt, 'df':t1_dict['df']}]
kk = np.where(np.array(df_list) == t1_dict['df'])[0][0]-ndf+1
df_list = np.append(df_list[kk:], df_list[:kk])
df_length_list = np.append(df_length_list[kk:], df_length_list[:kk])
time_list.append(dict([('dt', t1_dict['dt']), ('df', df_list[0])]))
ii = 1
else:
time_list = [{'dt':self.initial_dt, 'df':df_list[0]}]
ii = 0
for rr in range(1, repeat+1):
for df, df_length, jj in zip(df_list, df_length_list, range(ndf)):
dtime = time.strptime(df_length, '%H:%M:%S')
ndt = self.add_time(time_list[ii]['dt'],
add_hours=dtime.tm_hour,
add_minutes=dtime.tm_min,
add_seconds=dtime.tm_sec)
time_list.append({'dt':ndt.strftime(self.dt_format),
'df':df_list[jj-ndf+1]})
ii += 1
for nn, ns in enumerate(time_list):
sdate, stime = ns['dt'].split(',')
ns['date'] = sdate
ns['time'] = stime
ns['log_yn'] = 'Y'
ns['nf_yn'] = 'Y'
ns['sr'] = self.sr_dict[str(ns['df'])]
ns['tx_duty'] = '0'
ns['tx_period'] = '0'
ns['resync_yn'] = 'Y'
ns['gain'] = '0'
return time_list
#==================================================
def get_schedule_offset(self, time_offset, schedule_time_list):
"""
gets the offset in time from master schedule list and time_offset so
that all schedules will record at the same time according to master
schedule list schedule_time_list
Attributes:
-----------
**time_offset** : hh:mm:ss
the time offset given to the zen reciever
**schedule_time_list** : list
list of actual schedule times returned
from make_schedule
Returns:
--------
**s1** : dictionary
dictionary with keys:
* 'dt' --> date and time of offset from next schedule
event from schedule_time_list
* 'df' --> sampling rate of that event
"""
dt_offset = '{0},{1}'.format('2000-01-01', time_offset)
t0 = time.mktime(time.strptime('2000-01-01,00:00:00', self.dt_format))
for ii, tt in enumerate(schedule_time_list):
ssec = time.mktime(time.strptime(tt['dt'], self.dt_format))
osec = time.mktime(time.strptime(dt_offset, self.dt_format))
if ssec > osec:
sdiff = time.localtime(t0+(ssec-osec))
t1 = self.add_time('2000-01-01,00:00:00',
add_hours=sdiff.tm_hour,
add_minutes=sdiff.tm_min,
add_seconds=sdiff.tm_sec)
s1 = {'dt':t1.strftime(self.dt_format),
'df':schedule_time_list[ii-1]['df']}
return s1
#==================================================
def write_schedule(self, station, clear_schedule=True,
clear_metadata=True, varaspace=100,
savename=0, dt_offset=None,
df_list=None,
df_time_list=None,
repeat=8, gain=0):
"""
write a zen schedule file
**Note**: for the older boxes use 'Zeus3Ini.cfg' for the savename
Arguments:
----------
**station** : int
station name must be an integer for the Zen, can
be changed later
**clear_schedule** : [ True | False ]
write the line clearschedule in .cfg file
**clear_metadata** : [ True | False ]
write the line metadata clear in .cfg file
**varaspace** : electrode spacing in meters, can be changed later
**savename** : [ 0 | 1 | 2 | string]
* 0 --> saves as zenini.cfg
* 1 --> saves as Zeus2Ini.cfg
* 2 --> saves as ZEN.cfg
* string --> saves as the string, note the zen
boxes look for either 0 or 1, so this
option is useless
**dt_offset** : YYYY-MM-DD,hh:mm:ss
date and time off offset to start the scheduling.
if this is none then current time on computer is
used. **In UTC Time**
**Note**: this will shift the starting point to
match the master schedule, so that all
stations have the same schedule.
**df_list** : list
list of sampling rates in Hz
**df_time_list** : list
list of time intervals corresponding to df_list
in hh:mm:ss format
**repeat** : int
number of time to repeat the cycle of df_list
**gain** : int
gain on instrument, 2 raised to this number.
Returns:
--------
* writes .cfg files to any connected SD card according to channel
number and ch_num_dict
"""
if dt_offset is not None:
self.dt_offset = dt_offset
s1_dict = self.get_schedule_offset(self.dt_offset.split(',')[1],
self.master_schedule)
if df_list is not None:
self.df_list = df_list
if df_time_list is not None:
self.df_time_list = df_time_list
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=repeat*3)
self.sa_list = self.make_schedule(self.df_list,
self.df_time_list,
t1_dict=s1_dict, repeat=repeat)
drive_names = get_drive_names()
self.meta_dict['RX.STN'] = station
self.meta_dict['Ch.varAsp'] = '{0}'.format(varaspace)
if savename == 0:
save_name = 'zenini.cfg'
elif savename == 1:
save_name = 'Zeus3Ini.cfg'
elif savename == 2:
save_name = 'ZEN.cfg'
sfid = open(os.path.normpath(os.path.join('c:\\MT', save_name)),
'w')
for sa_dict in self.sa_list:
new_time = self.add_time(self.dt_offset,
add_hours=int(sa_dict['time'][0:2]),
add_minutes=int(sa_dict['time'][3:5]),
add_seconds=int(sa_dict['time'][6:]))
sa_line = ','.join([new_time.strftime(self.dt_format),
sa_dict['resync_yn'],
sa_dict['log_yn'],
'2047',
'1999999999',
sa_dict['sr'],
'0', '0', '0', 'y', 'n', 'n', 'n'])
sfid.write('scheduleaction '.upper()+sa_line[:-1]+'\n')
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in list(self.light_dict.keys()):
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
# print('Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
# self.ch_cmp_dict[dname[-1]]))
for dd in list(drive_names.keys()):
dname = drive_names[dd]
sfid = open(os.path.normpath(os.path.join(dd+':\\', save_name)),
'w')
for sa_dict in self.sa_list:
new_time = self.add_time(self.dt_offset,
add_hours=int(sa_dict['time'][0:2]),
add_minutes=int(sa_dict['time'][3:5]),
add_seconds=int(sa_dict['time'][6:]))
sa_line = ','.join([new_time.strftime(self.dt_format),
sa_dict['resync_yn'],
sa_dict['log_yn'],
'2047',
'1999999999',
sa_dict['sr'],
'0', '0', '0', 'y', 'n', 'n', 'n'])
sfid.write('scheduleaction '.upper()+sa_line[:-1]+'\n')
self.meta_dict['Ch.Cmp'] = self.ch_cmp_dict[dname[-1]]
self.meta_dict['Ch.Number'] = dname[-1]
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in list(self.light_dict.keys()):
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
print('Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
self.ch_cmp_dict[dname[-1]]))
return
else:
save_name = savename
for dd in list(drive_names.keys()):
dname = drive_names[dd]
sfid = open(os.path.normpath(os.path.join(dd+':\\', save_name)),
'w')
if clear_schedule:
sfid.write('clearschedule\n')
if clear_metadata:
sfid.write('metadata clear\n')
for sa_dict in self.sa_list:
if gain != 0:
sa_dict['gain'] = gain
sa_line = ''.join([sa_dict[key]+',' for key in self.sa_keys])
sfid.write('scheduleaction '+sa_line[:-1]+'\n')
sfid.write('offsetschedule {0}\n'.format(self.dt_offset))
self.meta_dict['Ch.Cmp'] = self.ch_cmp_dict[dname[-1]]
self.meta_dict['Ch.Number'] = dname[-1]
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in list(self.light_dict.keys()):
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
print('Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
self.ch_cmp_dict[dname[-1]]))
def write_schedule_for_gui(self, zen_start=None, df_list=None,
df_time_list=None, repeat=8, gain=0,
save_path=None,
schedule_fn='zen_schedule.MTsch'):
"""
write a zen schedule file
**Note**: for the older boxes use 'Zeus3Ini.cfg' for the savename
Arguments:
----------
**zen_start** : hh:mm:ss
start time you want the zen to start collecting
data.
if this is none then current time on computer is
used. **In UTC Time**
**Note**: this will shift the starting point to
match the master schedule, so that all
stations have the same schedule.
**df_list** : list
list of sampling rates in Hz
**df_time_list** : list
list of time intervals corresponding to df_list
in hh:mm:ss format
**repeat** : int
number of time to repeat the cycle of df_list
**gain** : int
gain on instrument, 2 raised to this number.
Returns:
--------
* writes a schedule file to input into the ZenAcq Gui
"""
if df_list is not None:
self.df_list = df_list
if df_time_list is not None:
self.df_time_list = df_time_list
if save_path is None:
save_path = os.getcwd()
# make a master schedule first
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=repeat*3)
# estimate the first off set time
t_offset_dict = self.get_schedule_offset(zen_start,
self.master_schedule)
# make the schedule with the offset of the first schedule action
self.sa_list = self.make_schedule(self.df_list,
self.df_time_list,
t1_dict=t_offset_dict,
repeat=repeat)
# make a list of lines to write to a file for ZenAcq
zacq_list = []
for ii, ss in enumerate(self.sa_list[:-1]):
t0 = self._convert_time_to_seconds(ss['time'])
t1 = self._convert_time_to_seconds(self.sa_list[ii+1]['time'])
if ss['date'] != self.sa_list[ii+1]['date']:
t1 += 24*3600
# subtract 10 seconds for transition between schedule items.
t_diff = t1-t0-self._resync_pause
zacq_list.append('$schline{0:.0f} = {1:.0f},{2:.0f},{3:.0f}\n'.format(
ii+1,
t_diff,
int(self.sr_dict[str(ss['df'])]),
1))
fn = os.path.join(save_path, schedule_fn)
fid = open(fn, 'w')
fid.writelines(zacq_list[0:16])
fid.close()
print('Wrote schedule file to {0}'.format(fn))
print('+--------------------------------------+')
print('| SET ZEN START TIME TO: {0} |'.format(zen_start))
print('+--------------------------------------+')
def _convert_time_to_seconds(self, time_string):
"""
convert a time string given as hh:mm:ss into seconds
"""
t_list = [float(tt) for tt in time_string.split(':')]
t_seconds = t_list[0] * 3600 +t_list[1] * 60 + t_list[2]
return t_seconds
#==============================================================================
# Error instances for Zen
#==============================================================================
class ZenGPSError(Exception):
"""
error for gps timing
"""
pass
class ZenSamplingRateError(Exception):
"""
error for different sampling rates
"""
pass
class ZenInputFileError(Exception):
"""
error for input files
"""
pass
#==============================================================================
# get the external drives for SD cards
#==============================================================================
def get_drives():
"""
get a list of logical drives detected on the machine
Note this only works for windows.
Outputs:
----------
**drives** : list of drives as letters
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zen.get_drives()
"""
drives = []
bitmask = win32api.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
#==============================================================================
# get the names of the drives which should correspond to channels
#==============================================================================
def get_drive_names():
"""
get a list of drive names detected assuming the cards are names by box
and channel.
Outputs:
----------
**drive_dict** : dictionary
keys are the drive letters and values are the
drive names
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zen.get_drives_names()
"""
drives = get_drives()
drive_dict = {}
for drive in drives:
try:
drive_name = win32api.GetVolumeInformation(drive+':\\')[0]
if drive_name.find('CH') > 0:
drive_dict[drive] = drive_name
except:
pass
if drives == {}:
print('No external drives detected, check the connections.')
return None
return drive_dict
def split_station(station):
"""
split station name into name and number
"""
for ii, ss in enumerate(station):
try:
int(ss)
find = ii
break
except ValueError:
continue
name = station[0:find]
number = station[find:]
return (name, number)
#==============================================================================
# copy files from SD cards
#==============================================================================
def copy_from_sd(station, save_path=r"d:\Peacock\MTData",
channel_dict={'1':'HX', '2':'HY', '3':'HZ',
'4':'EX', '5':'EY', '6':'HZ'},
copy_date=None, copy_type='all'):
"""
copy files from sd cards into a common folder (save_path)
do not put an underscore in station, causes problems at the moment
Arguments:
-----------
**station** : string
full name of station from which data is being saved
**save_path** : string
full path to save data to
**channel_dict** : dictionary
keys are the channel numbers as strings and the
values are the component that corresponds to that
channel, values are placed in upper case in the
code
**copy_date** : YYYY-MM-DD
date to copy from depending on copy_type
**copy_type** : [ 'all' | 'before' | 'after' | 'on' ]
* 'all' --> copy all files on the SD card
* 'before' --> copy files before and on this date
* 'after' --> copy files on and after this date
* 'on' --> copy files on this date only
Outputs:
-----------
**fn_list** : list
list of filenames copied to save_path
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01', save_path=r"/home/mt/survey_1")
"""
s_name, s_int = split_station(station)
drive_names = get_drive_names()
if drive_names is None:
raise IOError('No drives to copy from.')
save_path = os.path.join(save_path, station)
if not os.path.exists(save_path):
os.mkdir(save_path)
log_fid = open(os.path.join(save_path, 'copy_from_sd.log'), 'w')
# make a datetime object from copy date
if copy_date is not None:
c_date = dateutil.parser.parse(copy_date)
st_test = time.ctime()
fn_list = []
for key in list(drive_names.keys()):
dr = r"{0}:\\".format(key)
print('='*25+drive_names[key]+'='*25)
log_fid.write('='*25+drive_names[key]+'='*25+'\n')
for fn in os.listdir(dr):
if 'cal' in fn.lower():
continue
full_path_fn = os.path.normpath(os.path.join(dr, fn))
if fn[-4:] == '.cfg':
shutil.copy(full_path_fn, os.path.join(save_path, fn))
# test for copy date
if copy_date is not None:
file_date = datetime.datetime.fromtimestamp(
os.path.getmtime(full_path_fn))
if copy_type == 'after':
if file_date < c_date:
continue
elif copy_type == 'before':
if file_date > c_date:
continue
elif copy_type == 'on':
if file_date.date() != c_date.date():
continue
try:
file_size = os.stat(full_path_fn)[6]
if file_size >= 1600 and fn.find('.cfg') == -1:
zt = Zen3D(fn=full_path_fn)
zt.read_all_info()
if zt.metadata.station.find(s_int) >= 0:
channel = zt.metadata.ch_cmp.upper()
st = zt.schedule.Time.replace(':', '')
sd = zt.schedule.Date.replace('-', '')
sv_fn = '{0}_{1}_{2}_{3}_{4}.Z3D'.format(station,
sd,
st,
int(zt.df),
channel)
full_path_sv = os.path.join(save_path, sv_fn)
fn_list.append(full_path_sv)
shutil.copy(full_path_fn, full_path_sv)
print('copied {0} to {1}\n'.format(full_path_fn,
full_path_sv))
#log_fid.writelines(zt.log_lines)
log_fid.write('copied {0} to \n'.format(full_path_fn)+\
' {0}\n'.format(full_path_sv))
else:
log_fid.write('+++ Skipped {0} because file to small {1}'.format(full_path_fn,
file_size))
except WindowsError:
print('Faulty file at {0}'.format(full_path_fn))
log_fid.write('---Faulty file at {0}\n\n'.format(full_path_fn))
log_fid.close()
et_test = time.ctime()
print('Started copying at: {0}'.format(st_test))
print('Ended copying at: {0}'.format(et_test))
return fn_list
#==============================================================================
# delete files from sd cards
#==============================================================================
def delete_files_from_sd(delete_date=None, delete_type=None,
delete_folder=r"d:\Peacock\MTData\Deleted",
verbose=True):
"""
delete files from sd card, if delete_date is not None, anything on this
date and before will be deleted. Deletes just .Z3D files, leaves
zenini.cfg
Agruments:
-----------
**delete_date** : YYYY-MM-DD
date to delete files from
**delete_type** : [ 'all' | 'before' | 'after' | 'on' ]
* 'all' --> delete all files on sd card
* 'before' --> delete files on and before delete_date
* 'after' --> delete files on and after delete_date
* 'on' --> delete files on delete_date
**delete_folder** : string
full path to a folder where files will be moved to
just in case. If None, files will be deleted
for ever.
Returns:
---------
**delete_fn_list** : list
list of deleted files.
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> # Delete all files before given date, forever.
>>> zen.delete_files_from_sd(delete_date='2004/04/20',
delete_type='before',
delete_folder=None)
>>> # Delete all files into a folder just in case
>>> zen.delete_files_from_sd(delete_type='all',
delete_folder=r"/home/mt/deleted_files")
"""
drive_names = get_drive_names()
if drive_names is None:
raise IOError('No drives to copy from.')
log_lines = []
if delete_folder is not None:
if not os.path.exists(delete_folder):
os.mkdir(delete_folder)
log_fid = open(os.path.join(delete_folder, 'Log_file.log'), 'w')
if delete_date is not None:
delete_date = int(delete_date.replace('-', ''))
delete_fn_list = []
for key, value in drive_names.items():
dr = r"{0}:\\".format(key)
log_lines.append('='*25+value+'='*25+'\n')
for fn in os.listdir(dr):
if fn[-4:].lower() == '.Z3D'.lower():
full_path_fn = os.path.normpath(os.path.join(dr, fn))
zt = Zen3D(full_path_fn)
zt.read_all_info()
zt_date = int(zt.schedule.Date.replace('-', ''))
#zt.get_info()
if delete_type == 'all' or delete_date is None:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}'.format(delete_folder))
else:
#zt_date = int(zt.schedule_date.replace('-',''))
if delete_type == 'before':
if zt_date <= delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
elif delete_type == 'after':
if zt_date >= delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
elif delete_type == 'on':
if zt_date == delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
if delete_folder is not None:
log_fid = open(os.path.join(delete_folder, 'Delete_log.log'), 'w')
log_fid.writelines(log_lines)
log_fid.close()
if verbose:
for lline in log_lines:
print(lline)
return delete_fn_list
#==============================================================================
# Make mtpy_mt files
#==============================================================================
def make_mtpy_mt_files(fn_list, station_name='mb', fmt='%.8e',
notch_dict=None):
"""
makes mtpy_mt files from .Z3D files
Arguments:
-----------
**dirpath** : full path to .Z3D files
**station_name** : prefix for station names
**fmt** : format of data numbers for mt_files
Outputs:
--------
**fn_arr** : np.ndarray(file, length, df, start_dt)
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01')
>>> mtpy_fn = zen.make_mtpy_files(fn_list, station_name='mt')
"""
fn_arr = np.zeros(len(fn_list),
dtype=[('station', '|S6'), ('len', np.int), ('df', np.int),
('start_dt', '|S22'), ('comp', '|S2'),
('fn', '|S100')])
fn_lines = []
for ii, fn in enumerate(fn_list):
zd = Zen3D(fn)
#read in Z3D data
try:
zd.read_3d()
except ZenGPSError:
try:
zd._seconds_diff = 59
zd.read_3d()
except ZenGPSError:
pass
else:
#write mtpy mt file
zd.write_ascii_mt_file(save_station=station_name,
fmt=fmt,
notch_dict=notch_dict)
#create lines to write to a log file
fn_arr[ii]['station'] = '{0}{1}'.format(station_name, zd.rx_stn)
fn_arr[ii]['len'] = zd.time_series.shape[0]
fn_arr[ii]['df'] = zd.df
fn_arr[ii]['start_dt'] = zd.start_dt
fn_arr[ii]['comp'] = zd.ch_cmp
fn_arr[ii]['fn'] = zd.fn
fn_lines.append(''.join(['--> station: {0}{1}\n'.format(station_name,
zd.rx_stn),
' ts_len = {0}\n'.format(zd.time_series.shape[0]),
' df = {0}\n'.format(zd.df),
' start_dt = {0}\n'.format(zd.start_dt),
' comp = {0}\n'.format(zd.ch_cmp),
' fn = {0}\n'.format(zd.fn)]))
return fn_arr, fn_lines
|
MTgeophysics/mtpy
|
mtpy/usgs/zen.py
|
Python
|
gpl-3.0
| 107,020
|
[
"Gaussian"
] |
257050105ca2534830c24bf03dca781dcb247aac5c4982440736c1fab0d75825
|
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import json
import logging
import os, shutil, zipfile
import subprocess
from urllib.parse import urlparse
#
# DatabaseInteractor
#
class DatabaseInteractor(slicer.ScriptedLoadableModule.ScriptedLoadableModule):
def __init__(self, parent):
slicer.ScriptedLoadableModule.ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Database Interactor"
self.parent.categories = ["Web System Tools"]
self.parent.dependencies = []
self.parent.contributors = [
"Clement Mirabel (University of Michigan)", "Juan Carlos Prieto (UNC)"]
self.parent.helpText = """
For users using the amazon web service Website developed by Clement Mirabel and Juan Prieto for Lucia Cevidanes project, the server address should be 'https://ec2-52-42-49-63.us-west-2.compute.amazonaws.com:8180/'. If you have any issue connecting, contact juanprieto@gmail.com or clement.mirabel@gmail.com.
"""
self.parent.acknowledgementText = """
To be completed.
"""
#
# DatabaseInteractorWidget
#
class DatabaseInteractorWidget(slicer.ScriptedLoadableModule.ScriptedLoadableModuleWidget):
def setup(self):
"""
Function used to setup the UI, global variables and libraries used in this extension
"""
slicer.ScriptedLoadableModule.ScriptedLoadableModuleWidget.setup(self)
# ---------------------------------------------------------------- #
# ------------------------ Global variables ---------------------- #
# ---------------------------------------------------------------- #
import DatabaseInteractorLib
self.DatabaseInteractorLib = DatabaseInteractorLib.DatabaseInteractorLib()
import ClusterpostLib
self.ClusterpostLib = ClusterpostLib.ClusterpostLib()
self.logic = DatabaseInteractorLogic()
self.connected = False
self.collections = dict()
self.morphologicalData = dict()
self.attachmentsList = {}
self.tokenFilePath = os.path.join(slicer.app.temporaryPath, 'user.slicer_token')
self.serverFilePath = os.path.join(slicer.app.temporaryPath, 'user.slicer_server')
self.moduleName = 'DatabaseInteractor'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
self.modulesLoaded = dict()
# Timer definition
self.timer = qt.QTimer()
self.timer.timeout.connect(self.overflow)
self.timerPeriod = 60 * 60 * 1000
# ---------------------------------------------------------------- #
# ---------------- Definition of the UI interface ---------------- #
# ---------------------------------------------------------------- #
# --------------------------------------------------- #
# --- Definition of connection collapsible button --- #
# --------------------------------------------------- #
# Collapsible button
self.connectionCollapsibleButton = ctk.ctkCollapsibleButton()
self.connectionCollapsibleButton.text = "Authentication"
self.layout.addWidget(self.connectionCollapsibleButton)
self.connectionVBoxLayout = qt.QVBoxLayout(self.connectionCollapsibleButton)
# - GroupBox containing Server, Email and Password Inputs - #
self.connectionGroupBox = qt.QGroupBox("Login")
self.connectionGroupBoxLayout = qt.QFormLayout(self.connectionGroupBox)
self.connectionVBoxLayout.addWidget(self.connectionGroupBox)
# Server input
self.serverInput = qt.QLineEdit()
self.serverInput.text = ''
self.connectionGroupBoxLayout.addRow("Server address: ", self.serverInput)
# Email input
self.emailInput = qt.QLineEdit()
self.emailInput.text = ''
self.connectionGroupBoxLayout.addRow("Email address: ", self.emailInput)
# Password input
self.passwordInput = qt.QLineEdit()
self.passwordInput.text = ''
self.passwordInput.setEchoMode(qt.QLineEdit.Password)
self.connectionGroupBoxLayout.addRow("Password: ", self.passwordInput)
# Error Login Label
self.errorLoginText = qt.QLabel()
self.errorLoginText.setStyleSheet("color: rgb(255, 0, 0);")
self.connectionGroupBoxLayout.addWidget(self.errorLoginText)
self.errorLoginText.hide()
# Connection Button
self.connectionButton = qt.QPushButton("Connect")
self.connectionButton.toolTip = "Connect to the server."
self.connectionButton.enabled = False
self.connectionVBoxLayout.addWidget(self.connectionButton)
# Disconnection Button
self.disconnectionButton = qt.QPushButton("Disconnect")
self.disconnectionButton.toolTip = "Disconnect to the server."
self.connectionVBoxLayout.addWidget(self.disconnectionButton)
self.disconnectionButton.hide()
# ------------------------------------------------- #
# --- Definition of download collapsible button --- #
# ------------------------------------------------- #
# Collapsible button
self.downloadCollapsibleButton = ctk.ctkCollapsibleButton()
self.downloadCollapsibleButton.text = "Download data"
self.layout.addWidget(self.downloadCollapsibleButton, 0)
self.downloadFormLayout = qt.QFormLayout(self.downloadCollapsibleButton)
self.downloadCollapsibleButton.hide()
# Query type selector groupBox
self.queryTypeGroupBox = qt.QGroupBox("Find data with:")
self.queryTypeGroupBoxLayout = qt.QHBoxLayout(self.queryTypeGroupBox)
# RadioButtons choices
self.downloadRadioButtonPatientOnly = qt.QRadioButton("PatientId only")
self.downloadRadioButtonPatientOnly.setChecked(True)
self.downloadRadioButtonPatientDate = qt.QRadioButton("PatientId and date")
self.queryTypeGroupBoxLayout.addWidget(self.downloadRadioButtonPatientOnly)
self.queryTypeGroupBoxLayout.addWidget(self.downloadRadioButtonPatientDate)
self.downloadFormLayout.addRow(self.queryTypeGroupBox)
# Directory Button
self.downloadFilepathSelector = ctk.ctkDirectoryButton()
self.downloadFilepathSelector.toolTip = "Choose a path to save the model."
self.downloadFormLayout.addRow(qt.QLabel("Choose a destination: "), self.downloadFilepathSelector)
# Collection Selector
self.downloadCollectionSelector = qt.QComboBox()
self.downloadCollectionSelector.addItem("None")
self.downloadFormLayout.addRow("Choose a collection: ", self.downloadCollectionSelector)
# Download entire collection Button
self.downloadCollectionButton = qt.QPushButton("Download entire collection")
self.downloadCollectionButton.toolTip = "Download the whole collection in a folder."
self.downloadFormLayout.addWidget(self.downloadCollectionButton)
self.downloadCollectionButton.enabled = False
# Patient Selector
self.downloadPatientSelector = qt.QComboBox()
self.downloadPatientSelector.addItem("None")
self.downloadFormLayout.addRow("Choose a patient: ", self.downloadPatientSelector)
self.downloadPatientSelector.setDuplicatesEnabled(True)
# Date Selector
self.downloadDate = qt.QCalendarWidget()
self.downloadDate.setStyleSheet(
"QCalendarWidget QWidget#qt_calendar_navigationbar{background-color:rgb(200,200,200);}"
"QCalendarWidget QWidget#qt_calendar_nextmonth{"
"qproperty-icon: url(" + os.path.join(scriptedModulesPath, "Resources", "Icons", "ArrowRight.png") + ");"
"qproperty-iconSize: 10px;width:20px;}"
"QCalendarWidget QWidget#qt_calendar_prevmonth{"
"qproperty-icon: url(" + os.path.join(scriptedModulesPath, "Resources", "Icons", "ArrowLeft.png") + ");"
"qproperty-iconSize: 10px;width:20px;}"
"QCalendarWidget QToolButton{height:25px;width:90px;color:black;icon-size:25px,25px;"
"background-color:rgb(200,200,200);}"
"QCalendarWidget QMenu{width:125px;background-color:rgb(200,200,200);}"
"QCalendarWidget QSpinBox{width:65px;background-color:rgb(200,200,200);"
"selection-background-color:rgb(200,200,200);selection-color:black;}"
"QCalendarWidget QWidget{alternate-background-color:rgb(225,225,225);}"
"QCalendarWidget QAbstractItemView:enabled{color:rgb(100,100,100);"
"selection-background-color:rgb(200,200,200);selection-color:white;}"
"QCalendarWidget QAbstractItemView:disabled {color: rgb(200, 200, 200);}")
self.downloadDateLabel = qt.QLabel("Choose a date: ")
self.downloadFormLayout.addRow(self.downloadDateLabel, self.downloadDate)
self.downloadDateLabel.hide()
self.downloadDate.hide()
# Clickable dates formats
self.brushBlue = qt.QBrush()
self.brushBlue.setColor(qt.QColor(107,171,200))
self.checkableDateFormat = qt.QTextCharFormat()
self.checkableDateFormat.setBackground(self.brushBlue)
self.checkableDateFormat.setFontWeight(qt.QFont.Bold)
self.normalBrush = qt.QBrush()
self.normalBrush.setColor(qt.QColor(255, 255, 255))
self.normalDateFormat = qt.QTextCharFormat()
self.normalDateFormat.setBackground(self.normalBrush)
self.normalDateFormat.setFontWeight(qt.QFont.Normal)
# Attachment selector
self.downloadAttachmentSelector = qt.QComboBox()
self.downloadAttachmentSelector.addItem("None")
self.downloadFormLayout.addRow("Choose an attachment: ", self.downloadAttachmentSelector)
# Error download Label
self.downloadErrorText = qt.QLabel("No file found for this patientId !")
self.downloadErrorText.setStyleSheet("color: rgb(255, 0, 0);")
self.downloadFormLayout.addWidget(self.downloadErrorText)
self.downloadErrorText.hide()
# Download Button
self.downloadButton = qt.QPushButton("Download selected attachment")
self.downloadButton.toolTip = "Download patient data."
self.downloadFormLayout.addRow(self.downloadButton)
self.downloadButton.enabled = False
# ----------------------------------------------- #
# --- Definition of upload collapsible button --- #
# ----------------------------------------------- #
# Collapsible button
self.uploadCollapsibleButton = ctk.ctkCollapsibleButton()
self.uploadCollapsibleButton.text = "Upload data"
self.layout.addWidget(self.uploadCollapsibleButton, 0)
self.uploadFormLayout = qt.QFormLayout(self.uploadCollapsibleButton)
self.uploadCollapsibleButton.hide()
# Directory Button
self.uploadFilepathSelector = ctk.ctkDirectoryButton()
self.uploadFilepathSelector.toolTip = "Choose the path to the folder where you saved patient files."
self.uploadFormLayout.addRow(qt.QLabel("Choose collection folder: "), self.uploadFilepathSelector)
# Patient Selector
self.uploadPatientSelector = qt.QComboBox()
self.uploadPatientSelector.addItem("None")
self.uploadFormLayout.addRow("Choose a patient: ", self.uploadPatientSelector)
# Date Selector
self.uploadDateSelector = qt.QComboBox()
self.uploadDateSelector.addItem("None")
self.uploadFormLayout.addRow("Choose a date: ", self.uploadDateSelector)
# Layout of the differences between local folder and online database
self.uploadListLayout = qt.QVBoxLayout()
self.uploadFormLayout.addRow("Files to upload: ", self.uploadListLayout)
self.noneLabel = qt.QLabel("None")
self.uploadListLayout.addWidget(self.noneLabel)
# Volume selector
# self.modelSelector = slicer.qMRMLNodeComboBox()
# self.modelSelector.nodeTypes = (("vtkMRMLModelNode"), "")
# self.modelSelector.addEnabled = False
# self.modelSelector.removeEnabled = False
# self.modelSelector.setMRMLScene(slicer.mrmlScene)
# self.uploadFormLayout.addRow("Choose a model: ", self.modelSelector)
# Upload Button
self.uploadButton = qt.QPushButton("Upload")
self.uploadButton.toolTip = "Upload patient data."
self.uploadFormLayout.addRow(self.uploadButton)
self.uploadButton.enabled = False
self.uploadLabel = qt.QLabel("Files successfully uploaded !")
self.uploadLabel.setStyleSheet("color: rgb(0, 150, 0);")
self.uploadFormLayout.addRow(self.uploadLabel)
self.uploadLabel.hide()
# ----------------------------------------------- #
# - Definition of management collapsible button - #
# ----------------------------------------------- #
# Collapsible button
self.managementCollapsibleButton = ctk.ctkCollapsibleButton()
self.managementCollapsibleButton.text = "Management"
self.layout.addWidget(self.managementCollapsibleButton, 0)
self.managementFormLayout = qt.QFormLayout(self.managementCollapsibleButton)
self.managementCollapsibleButton.hide()
# Creation type selector groupBox
self.creationTypeGroupBox = qt.QGroupBox("Action wanted:")
self.creationTypeGroupBoxLayout = qt.QHBoxLayout(self.creationTypeGroupBox)
# RadioButtons choice
self.managementRadioButtonPatient = qt.QRadioButton("Create PatientId")
self.managementRadioButtonPatient.setChecked(True)
self.managementRadioButtonDate = qt.QRadioButton("Add a new date")
self.creationTypeGroupBoxLayout.addWidget(self.managementRadioButtonPatient)
self.creationTypeGroupBoxLayout.addWidget(self.managementRadioButtonDate)
self.managementFormLayout.addRow(self.creationTypeGroupBox)
# Directory Button
self.managementFilepathSelector = ctk.ctkDirectoryButton()
self.managementFilepathSelector.toolTip = "Choose the path to the folder where you saved patient files."
self.managementFormLayout.addRow(qt.QLabel("Choose collection folder: "), self.managementFilepathSelector)
# Patient name input
self.newPatientIdInput = qt.QLineEdit()
self.newPatientIdInput.text = ''
self.newPatientIdInputLabel = qt.QLabel("Enter PatiendId: ")
self.managementFormLayout.addRow(self.newPatientIdInputLabel, self.newPatientIdInput)
# Patient Selector
self.managementPatientSelector = qt.QComboBox()
self.managementPatientSelector.addItem("None")
self.managementPatientSelectorLabel = qt.QLabel("Choose a patient: ")
self.managementFormLayout.addRow(self.managementPatientSelectorLabel, self.managementPatientSelector)
self.managementPatientSelector.hide()
self.managementPatientSelectorLabel.hide()
# Date Selector
self.createDate = qt.QCalendarWidget()
self.createDate.setStyleSheet(
"QCalendarWidget QWidget#qt_calendar_navigationbar{background-color:rgb(200,200,200);}"
"QCalendarWidget QWidget#qt_calendar_nextmonth{"
"qproperty-icon: url(" + os.path.join(scriptedModulesPath, "Resources", "Icons", "ArrowRight.png") + ");"
"qproperty-iconSize: 10px;width:20px;}"
"QCalendarWidget QWidget#qt_calendar_prevmonth{"
"qproperty-icon: url(" + os.path.join(scriptedModulesPath, "Resources", "Icons", "ArrowLeft.png") + ");"
"qproperty-iconSize: 10px;width:20px;}"
"QCalendarWidget QToolButton{height:25px;width:90px;color:black;icon-size:25px,25px;"
"background-color:rgb(200,200,200);}"
"QCalendarWidget QMenu{width:125px;background-color:rgb(200,200,200);}"
"QCalendarWidget QSpinBox{width:65px;background-color:rgb(200,200,200);"
"selection-background-color:rgb(200,200,200);selection-color:black;}"
"QCalendarWidget QWidget{alternate-background-color:rgb(225,225,225);}"
"QCalendarWidget QAbstractItemView:enabled{color:rgb(100,100,100);"
"selection-background-color:rgb(200,200,200);selection-color:white;}"
"QCalendarWidget QAbstractItemView:disabled {color: rgb(200, 200, 200);}")
self.createDateLabel = qt.QLabel("Choose a date: ")
self.managementFormLayout.addRow(self.createDateLabel, self.createDate)
# Patient & Date Creator Button
self.createButton = qt.QPushButton("Create patient Id")
self.createButton.enabled = False
self.managementFormLayout.addRow(self.createButton)
# -------------------------------------------------------------- #
# -------- Definition of task creator collapsible button ------- #
# -------------------------------------------------------------- #
# Collapsible button
self.taskCreatorCollapsibleButton = ctk.ctkCollapsibleButton()
self.taskCreatorCollapsibleButton.text = "Create a task"
self.layout.addWidget(self.taskCreatorCollapsibleButton)
self.taskCreatorFormLayout = qt.QFormLayout(self.taskCreatorCollapsibleButton)
self.executableSelector = qt.QComboBox()
self.taskCreatorFormLayout.addRow("Select an executable:", self.executableSelector)
self.widgetSelectedGroupBox = qt.QGroupBox()
self.widgetSelectedGroupBoxLayout = qt.QVBoxLayout(self.widgetSelectedGroupBox)
self.taskCreatorFormLayout.addRow(self.widgetSelectedGroupBox)
self.widgetSelectedGroupBox.hide()
self.currentExecutable = None
# self.condyleClassificationCollapsibleButton.hide()
self.taskCreatorCollapsibleButton.hide()
# -------------------------------------------------------------- #
# ------------- Job computing collapsible button --------------- #
# -------------------------------------------------------------- #
# Collapsible button
self.jobComputerCollapsibleButton = ctk.ctkCollapsibleButton()
self.jobComputerCollapsibleButton.text = "Auto compute tasks"
self.layout.addWidget(self.jobComputerCollapsibleButton)
self.jobComputerCollapsibleFormLayout = qt.QFormLayout(self.jobComputerCollapsibleButton)
self.jobComputerHostInput = qt.QLineEdit()
self.jobComputerHostInput.setReadOnly(True)
self.jobComputerPortInput = qt.QLineEdit()
self.jobComputerPortInput.setReadOnly(True)
self.jobComputerPortInput = qt.QLineEdit()
self.jobComputerPortInput.setReadOnly(True)
self.timePeriodSelector = qt.QDoubleSpinBox()
self.timePeriodSelector.setSuffix(' min')
self.timePeriodSelector.setRange(1,1440)
self.timePeriodSelector.setDecimals(0)
self.jobComputerParametersLayout = qt.QHBoxLayout()
self.jobComputerParametersLayout.addWidget(qt.QLabel("Host: "))
self.jobComputerParametersLayout.addWidget(self.jobComputerHostInput)
self.jobComputerParametersLayout.addWidget(qt.QLabel("Port: "))
self.jobComputerParametersLayout.addWidget(self.jobComputerPortInput)
self.jobComputerParametersLayout.addWidget(qt.QLabel("Period: "))
self.jobComputerParametersLayout.addWidget(self.timePeriodSelector)
self.jobComputerCollapsibleFormLayout.addRow(self.jobComputerParametersLayout)
# Connect Socket Button
self.connectListenerButton = qt.QPushButton("Connect")
# Disconnect Socket Button
self.disconnectListenerButton = qt.QPushButton("Disconnect")
self.disconnectListenerButton.enabled = False
# HBox Layout for (dis)connection buttons
self.connectionHBoxLayout = qt.QHBoxLayout()
self.connectionHBoxLayout.addWidget(self.connectListenerButton)
self.connectionHBoxLayout.addWidget(self.disconnectListenerButton)
self.jobComputerCollapsibleFormLayout.addRow(self.connectionHBoxLayout)
# Websocket Console
self.displayConsole = qt.QTextEdit()
self.displayConsole.readOnly = True
self.displayConsole.setStyleSheet(
"color: white;"
"background-color: black;"
"font-family: Courier;font-style: normal;font-size: 12pt;"
)
self.jobComputerCollapsibleFormLayout.addRow(self.displayConsole)
self.jobComputerCollapsibleButton.hide()
# Add vertical spacer
self.layout.addStretch(1)
# --------------------------------------------------------- #
# ----------------------- Signals ------------------------- #
# --------------------------------------------------------- #
# Buttons
self.connectionButton.connect('clicked(bool)', self.onConnectionButton)
self.disconnectionButton.connect('clicked(bool)', self.onDisconnectionButton)
self.downloadButton.connect('clicked(bool)', self.onDownloadButton)
self.downloadCollectionButton.connect('clicked(bool)', self.onDownloadCollectionButton)
self.uploadButton.connect('clicked(bool)', self.onUploadButton)
self.createButton.connect('clicked(bool)', self.onCreateButton)
self.connectListenerButton.connect('clicked(bool)', self.onConnectListenerButton)
self.disconnectListenerButton.connect('clicked(bool)', self.onDiconnectListenerButton)
# Radio Buttons
self.downloadRadioButtonPatientOnly.toggled.connect(self.onRadioButtontoggled)
self.downloadRadioButtonPatientDate.toggled.connect(self.onRadioButtontoggled)
self.managementRadioButtonDate.toggled.connect(self.onManagementRadioButtontoggled)
self.managementRadioButtonPatient.toggled.connect(self.onManagementRadioButtontoggled)
# Inputs
self.emailInput.textChanged.connect(self.onInputChanged)
self.passwordInput.textChanged.connect(self.onInputChanged)
self.newPatientIdInput.textChanged.connect(self.isPossibleCreatePatient)
# ComboBoxes
self.downloadCollectionSelector.connect('currentIndexChanged(const QString&)', self.fillSelectorWithPatients)
self.downloadPatientSelector.connect('currentIndexChanged(const QString&)', self.onDownloadPatientChosen)
self.uploadPatientSelector.connect('currentIndexChanged(const QString&)', self.onUploadPatientChosen)
self.uploadDateSelector.connect('currentIndexChanged(const QString&)', self.onUploadDateChosen)
self.managementPatientSelector.connect('currentIndexChanged(const QString&)', self.isPossibleAddDate)
self.executableSelector.connect('currentIndexChanged(const QString&)', self.executableSelected)
# Calendar
self.downloadDate.connect('clicked(const QDate&)', self.fillSelectorWithAttachments)
# FilePath selectors
self.uploadFilepathSelector.connect('directorySelected(const QString &)', self.createFilesDictionary)
self.managementFilepathSelector.connect('directorySelected(const QString &)',
self.onManagementDirectorySelected)
# --- Try to connect when launching the module --- #
if os.path.exists(self.tokenFilePath):
file = open(self.tokenFilePath, 'r')
first_line = file.readline()
self.DatabaseInteractorLib.getServer(self.serverFilePath)
if first_line != "":
# self.token = first_line
self.ClusterpostLib.setToken(first_line)
self.ClusterpostLib.setServerUrl(self.DatabaseInteractorLib.server[:-1])
self.DatabaseInteractorLib.token = first_line
self.connected = True
self.connectionGroupBox.hide()
self.connectionButton.hide()
self.disconnectionButton.show()
self.fillSelectorWithCollections()
self.downloadCollapsibleButton.show()
self.uploadCollapsibleButton.show()
self.taskCreatorCollapsibleButton.show()
self.jobComputerCollapsibleButton.show()
self.fillExecutableSelector()
server = self.DatabaseInteractorLib.server
self.jobComputerHostInput.text = urlparse(server).hostname
self.jobComputerPortInput.text = urlparse(server).port
if "admin" in self.DatabaseInteractorLib.getUserScope():
self.managementCollapsibleButton.show()
file.close()
def exit(self):
"""
Function used to reset the CLI widget when exiting the module
"""
print("-----------EXIT------------")
for moduleName, slot in self.modulesLoaded.items():
node = getattr(slicer.modules, moduleName)
applyButton = node.widgetRepresentation().ApplyPushButton
applyButton.disconnect(applyButton, 'clicked(bool)')
applyButton.connect('clicked(bool)', slot)
self.modulesLoaded = dict()
# ------------ Buttons -------------- #
def onConnectionButton(self):
"""
Function used to connect user to the database and store token in a file
"""
self.DatabaseInteractorLib.setServer(self.serverInput.text, self.serverFilePath)
self.ClusterpostLib.setServerUrl(self.serverInput.text[:-1])
token, error = self.DatabaseInteractorLib.connect(self.emailInput.text, self.passwordInput.text)
print(error)
if token != -1:
userScope = self.DatabaseInteractorLib.getUserScope()
if len(userScope) != 1 or "default" not in userScope:
# Write the token in a temporary file
file = open(self.tokenFilePath, 'w+')
file.write(token)
file.close()
self.ClusterpostLib.setToken(token)
self.connected = True
self.connectionGroupBox.hide()
self.connectionButton.hide()
self.disconnectionButton.show()
self.fillSelectorWithCollections()
self.downloadCollapsibleButton.show()
self.uploadCollapsibleButton.show()
self.managementCollapsibleButton.show()
self.taskCreatorCollapsibleButton.show()
self.jobComputerCollapsibleButton.show()
self.fillExecutableSelector()
self.jobComputerHostInput.text = urlparse(self.serverInput.text).hostname
self.jobComputerPortInput.text = urlparse(self.serverInput.text).port
if "admin" not in userScope:
self.managementCollapsibleButton.hide()
elif token == -1:
self.errorLoginText.text = error
self.errorLoginText.show()
else:
self.errorLoginText.text = "Insufficient scope ! Email luciacev@umich.edu for access."
self.errorLoginText.show()
def onDisconnectionButton(self):
"""
Function used to disconnect user to the database
"""
self.serverInput.text = self.DatabaseInteractorLib.server
self.emailInput.text = self.DatabaseInteractorLib.getUserEmail()
self.DatabaseInteractorLib.disconnect()
self.connected = False
self.passwordInput.text = ''
self.connectionGroupBox.show()
self.connectionButton.show()
self.errorLoginText.hide()
self.disconnectionButton.hide()
self.downloadCollapsibleButton.hide()
self.uploadCollapsibleButton.hide()
self.managementCollapsibleButton.hide()
self.taskCreatorCollapsibleButton.hide()
self.jobComputerCollapsibleButton.hide()
# Erase token from file
with open(self.tokenFilePath, "w"):
pass
def onDownloadButton(self):
"""
Function used to download data with information provided
"""
for items in self.morphologicalData:
if "_attachments" in items:
for attachments in items["_attachments"].keys():
if attachments == self.downloadAttachmentSelector.currentText:
documentId = items["_id"]
data = self.DatabaseInteractorLib.getAttachment(documentId, self.downloadAttachmentSelector.currentText, None)
# Write the attachment in a file
filePath = os.path.join(self.downloadFilepathSelector.directory, self.downloadAttachmentSelector.currentText)
if data != -1:
with open(filePath, 'wb+') as file:
for chunk in data.iter_content(2048):
file.write(chunk)
file.close()
# Load the file
self.logic.fileLoader(filePath)
def onDownloadCollectionButton(self):
"""
Function used to download an entire collection and organise it with folders
"""
collectionPath = os.path.join(self.downloadFilepathSelector.directory,
self.downloadCollectionSelector.currentText)
# Check if folder already exists
if not os.path.exists(collectionPath):
os.makedirs(collectionPath)
index = 0
# Write collection document
for items in self.DatabaseInteractorLib.getMorphologicalDataCollections().json():
if items["name"] == self.downloadCollectionSelector.currentText:
collectionId = items["_id"]
descriptor = {
"_id": collectionId,
"type": "morphologicalDataCollection",
"items": {}
}
# Create a folder for each patient
while index < self.downloadPatientSelector.count:
if self.downloadPatientSelector.itemText(index) != "None":
descriptor["items"][self.downloadPatientSelector.itemText(index)] = {}
if not os.path.exists(os.path.join(collectionPath, self.downloadPatientSelector.itemText(index))):
os.makedirs(os.path.join(collectionPath, self.downloadPatientSelector.itemText(index)))
index += 1
# Fill the folders with attachments
for items in self.morphologicalData:
if "_attachments" in items:
documentId = items["_id"]
patientId = items["patientId"]
if "date" in items:
date = items["date"]
else:
date = "NoDate"
descriptor["items"][patientId][date[:10]] = os.path.join(collectionPath, patientId, date[:10],
".DBIDescriptor")
if not os.path.exists(os.path.join(collectionPath, patientId, date[:10])):
os.makedirs(os.path.join(collectionPath, patientId, date[:10]))
for attachments in items["_attachments"].keys():
data = self.DatabaseInteractorLib.getAttachment(documentId, attachments, None).text
# Save the document
file = open(os.path.join(collectionPath, patientId, date[:10], '.DBIDescriptor'), 'w+')
json.dump(items, file, indent=3, sort_keys=True)
file.close()
# Save the attachment
filePath = os.path.join(collectionPath, patientId, date[:10], attachments)
with open(filePath, 'wb+') as file:
for chunk in data:
file.write(chunk)
file.close()
file = open(os.path.join(collectionPath, '.DBIDescriptor'), 'w+')
json.dump(descriptor, file, indent=3, sort_keys=True)
file.close()
self.uploadFilepathSelector.directory = collectionPath
self.managementFilepathSelector.directory = collectionPath
def onUploadButton(self):
"""
Function used to upload a data to the correct patient
"""
collection = self.uploadFilepathSelector.directory
self.checkBoxesChecked()
for patient in self.checkedList.keys():
for date in self.checkedList[patient].keys():
for attachment in self.checkedList[patient][date]["items"]:
with open(os.path.join(collection, patient, date, '.DBIDescriptor'), 'r') as file:
descriptor = json.load(file)
documentId = descriptor["_id"]
# Add new attachments to patient
path = os.path.join(collection,patient,date,attachment)
with open(path, 'r') as file:
self.DatabaseInteractorLib.addAttachment(documentId, attachment, file)
# Update descriptor
data = self.DatabaseInteractorLib.getMorphologicalDataByPatientId(patient).json()[0]
file = open(os.path.join(self.uploadFilepathSelector.directory, patient, date,
'.DBIDescriptor'), 'w+')
json.dump(data, file, indent=3, sort_keys=True)
file.close()
# Update morphologicalData list
for items in self.collections:
if items["name"] == self.downloadCollectionSelector.currentText:
self.morphologicalData = self.DatabaseInteractorLib.getMorphologicalData(items["_id"]).json()
self.uploadLabel.show()
def onCreateButton(self):
"""
Function used to create the architecture for a new patient or new date, updating descriptors
"""
collectionPath = self.managementFilepathSelector.directory
patientId = self.managementPatientSelector.currentText
date = str(self.createDate.selectedDate)
collection = os.path.split(self.managementFilepathSelector.directory)[1]
# Check if it is a new patient
if self.managementRadioButtonPatient.isChecked():
patientId = self.newPatientIdInput.text
# Add to database
owner = self.DatabaseInteractorLib.getUserEmail()
data = {
"patientId": patientId,
"date": date,
"owners": [{"user": owner}],
"type": "morphologicalData",
}
docId = self.DatabaseInteractorLib.createMorphologicalData(json.dumps(data)).json()["id"]
for items in self.collections:
if items["name"] == collection:
collectionJson = self.DatabaseInteractorLib.getMorphologicalDataCollection(items["_id"]).json()
collectionJson["items"].append({'_id': docId})
self.DatabaseInteractorLib.updateMorphologicalDataCollection(json.dumps(collectionJson))
# Create date folder
if not os.path.exists(os.path.join(collectionPath, patientId, date)):
os.makedirs(os.path.join(collectionPath, patientId, date))
# Write descriptor
for items in self.DatabaseInteractorLib.getMorphologicalData(collectionJson["_id"]).json():
if items["_id"] == docId:
file = open(os.path.join(collectionPath, patientId, date, '.DBIDescriptor'), 'w+')
json.dump(items, file, indent=3, sort_keys=True)
file.close()
# Update collection descriptor
file = open(os.path.join(collectionPath, '.DBIDescriptor'), 'r')
jsonfile = json.load(file)
file.close()
if patientId not in jsonfile["items"]:
jsonfile["items"][patientId] = {}
jsonfile["items"][patientId][date] = os.path.join(collectionPath, patientId, date, '.DBIDescriptor')
file = open(os.path.join(collectionPath, '.DBIDescriptor'), 'w+')
json.dump(jsonfile, file, indent=3, sort_keys=True)
file.close()
self.fillSelectorWithPatients()
def onConnectListenerButton(self):
"""
Function used to create a timer with the period selected in the UI
"""
self.timerPeriod = int(self.timePeriodSelector.value) * 60 * 1000 # Timer period is in ms
self.timer.start(self.timerPeriod)
self.connectListenerButton.enabled = False
self.disconnectListenerButton.enabled = True
def onDiconnectListenerButton(self):
"""
Function used to stop the timer and the job listening
"""
self.timer.stop()
self.connectListenerButton.enabled = True
self.disconnectListenerButton.enabled = False
# ---------- Radio Buttons ---------- #
def onRadioButtontoggled(self):
"""
Function used to display interface corresponding to the query checked
"""
self.downloadErrorText.hide()
self.fillSelectorWithAttachments()
if self.downloadRadioButtonPatientOnly.isChecked():
self.downloadErrorText.text = "No file found for this patientId !"
self.downloadDateLabel.hide()
self.downloadDate.hide()
else:
self.downloadErrorText.text = "No file found for this date !"
self.downloadDateLabel.show()
self.downloadDate.show()
def onManagementRadioButtontoggled(self):
"""
Function used to display interface corresponding to the management action checked
"""
if self.managementRadioButtonPatient.isChecked():
self.managementPatientSelector.hide()
self.managementPatientSelectorLabel.hide()
self.newPatientIdInput.show()
self.newPatientIdInputLabel.show()
self.createButton.setText("Create this patientId")
self.isPossibleCreatePatient()
else:
self.fillSelectorWithDescriptorPatients()
self.newPatientIdInput.hide()
self.newPatientIdInputLabel.hide()
self.managementPatientSelector.show()
self.managementPatientSelectorLabel.show()
self.createButton.setText("Add this date")
self.isPossibleAddDate()
# ------------- Inputs -------------- #
def onInputChanged(self):
"""
Function used to enable the connection button if userlogin and password are provided
"""
self.connectionButton.enabled = (len(self.emailInput.text) != 0 and len(self.passwordInput.text) != 0)
def isPossibleCreatePatient(self):
"""
Function used to enable the creation button if path contains a descriptor and is a name is given
"""
directoryPath = self.managementFilepathSelector.directory
self.createButton.enabled = False
# Check if the folder is a collection with a DBIDescriptor file
if self.newPatientIdInput.text != '' and os.path.exists(os.path.join(directoryPath, '.DBIDescriptor')):
self.createButton.enabled = True
# ----------- Combo Boxes ----------- #
def fillSelectorWithCollections(self):
"""
Function used to fill the comboBoxes with morphologicalCollections
"""
self.collections = self.DatabaseInteractorLib.getMorphologicalDataCollections().json()
self.downloadCollectionSelector.clear()
for items in self.collections:
self.downloadCollectionSelector.addItem(items["name"])
if self.downloadCollectionSelector.count == 0:
self.downloadCollectionSelector.addItem("None")
def fillSelectorWithPatients(self):
"""
Function used to fill the comboBox with patientId corresponding to the collection selected
"""
for items in self.morphologicalData:
if "date" in items:
date = items["date"]
self.downloadDate.setDateTextFormat(qt.QDate(int(date[:4]), int(date[5:7]), int(date[8:10])),
self.normalDateFormat)
else:
date = "NoDate"
text = self.downloadCollectionSelector.currentText
self.downloadButton.enabled = text
if text != "None":
self.downloadCollectionButton.enabled = True
# Get the patientIds in the selected collection
for items in self.collections:
if items["name"] == text:
self.morphologicalData = self.DatabaseInteractorLib.getMorphologicalData(items["_id"]).json()
self.downloadPatientSelector.clear()
for items in self.morphologicalData:
if self.downloadPatientSelector.findText(items["patientId"]) == -1:
self.downloadPatientSelector.addItem(items["patientId"])
if self.downloadPatientSelector.count == 0:
self.downloadPatientSelector.addItem("None")
self.downloadPatientSelector.model().sort(0)
self.downloadPatientSelector.setCurrentIndex(0)
def fillSelectorWithDescriptorPatients(self):
"""
Function used to fill the comboBox with a list of attachment corresponding to the query results
"""
directoryPath = self.managementFilepathSelector.directory
self.managementPatientSelector.clear()
if os.path.exists(os.path.join(directoryPath, '.DBIDescriptor')):
file = open(os.path.join(directoryPath, '.DBIDescriptor'), 'r')
collectionDescriptor = json.load(file)
patientList = list(collectionDescriptor["items"].keys())
self.managementPatientSelector.addItems(patientList)
else:
self.managementPatientSelector.addItem("None")
self.managementPatientSelector.model().sort(0)
self.managementPatientSelector.setCurrentIndex(0)
def fillExecutableSelector(self):
"""
Function used to fill the comboBox with a list of executable in the current Slicer
"""
self.modules = {}
modules = list(slicer.modules.__dict__.keys())
for moduleName in modules:
module = getattr(slicer.modules, moduleName)
if hasattr(module, "cliModuleLogic"):
self.executableSelector.addItem(module.name)
def isPossibleAddDate(self):
"""
Function used to enable creation button if path contains a descriptor and is a patient is chosen
"""
directoryPath = self.managementFilepathSelector.directory
self.createButton.enabled = False
if self.managementPatientSelector.currentText != 'None' and os.path.exists(
os.path.join(directoryPath, '.DBIDescriptor')):
self.createButton.enabled = True
def onDownloadPatientChosen(self):
"""
Function used to enable the download button when everything is ok
"""
collectionName = self.downloadCollectionSelector.currentText
patientId = self.downloadPatientSelector.currentText
if collectionName != "None" and patientId != "None":
self.downloadButton.enabled = True
self.fillSelectorWithAttachments()
self.highlightDates()
def onUploadPatientChosen(self):
"""
Function used to show in a list the new documents for a patient
"""
self.uploadDateSelector.clear()
self.uploadLabel.hide()
if self.uploadPatientSelector.currentText != "" and self.uploadPatientSelector.currentText != "None":
for dates in self.attachmentsList[self.uploadPatientSelector.currentText].keys():
self.uploadDateSelector.addItem(dates)
if self.uploadDateSelector.count == 0:
self.uploadDateSelector.addItem("None")
def onUploadDateChosen(self):
"""
Function used to display the checkboxes corresponding on the patient and timepoint selected
"""
self.clearCheckBoxList()
# Display new attachments in the layout
if self.uploadDateSelector.currentText != "" and self.uploadDateSelector.currentText != "None":
timepoint = self.attachmentsList[self.uploadPatientSelector.currentText][self.uploadDateSelector.currentText]
for items in timepoint["items"]:
item = qt.QCheckBox(items)
self.uploadListLayout.addWidget(item)
timepoint["checkbox"][items] = item
if self.uploadListLayout.count() != 0:
self.uploadButton.enabled = True
else:
self.uploadButton.enabled = False
self.uploadListLayout.addWidget(self.noneLabel)
self.noneLabel.setText("None")
def executableSelected(self):
"""
Function used to display the widget corresponding to the CLI selected
"""
if hasattr(slicer.modules, self.executableSelector.currentText.lower()):
self.modulesLoaded[str(self.executableSelector.currentText.lower())] = getattr(slicer.modules, self.executableSelector.currentText.lower()).widgetRepresentation().apply
self.widgetSelectedGroupBox.show()
if self.currentExecutable:
self.layout.removeWidget(self.currentExecutable.widgetRepresentation())
self.currentExecutable.widgetRepresentation().hide()
self.currentExecutable = getattr(slicer.modules, self.executableSelector.currentText.lower())
self.widgetSelectedGroupBoxLayout.addWidget(self.currentExecutable.widgetRepresentation())
# Adjust height
self.currentExecutable.widgetRepresentation().setFixedHeight(350)
# Change Apply connection to send a remote task
applyButton = self.currentExecutable.widgetRepresentation().ApplyPushButton
self.currentExecutable.widgetRepresentation().show()
applyButton.disconnect(applyButton,'clicked()')
applyButton.connect('clicked(bool)', self.createJobFromModule)
def createJobFromModule(self):
"""
Function used to create and submit a job based on the CLI interface
"""
cli = {}
attachments = []
if self.currentExecutable.widgetRepresentation().currentCommandLineModuleNode():
node = self.currentExecutable.widgetRepresentation().currentCommandLineModuleNode()
executionServer = ""
if slicer.app.applicationName == "Slicer":
executionServer = "Slicer" + slicer.app.applicationVersion[:5]
else:
executionServer = slicer.app.applicationName
cli = {
"executable": self.executableSelector.currentText,
"parameters": [],
"inputs": [],
"outputs": [],
"type": "job",
"userEmail": self.DatabaseInteractorLib.getUserEmail(),
"executionserver": executionServer
}
for groupIndex in xrange(0, node.GetNumberOfParameterGroups()):
for parameterIndex in xrange(0, node.GetNumberOfParametersInGroup(groupIndex)):
if node.GetParameterLongFlag(groupIndex, parameterIndex):
flag = node.GetParameterLongFlag(groupIndex, parameterIndex)
if flag:
while flag[0] == "-":
flag = flag[1:]
flag = "--" + flag
else:
flag = node.GetParameterFlag(groupIndex, parameterIndex)
if flag:
while flag[0] == "-":
flag = flag[1:]
flag = "-" + flag
value = node.GetParameterAsString(node.GetParameterName(groupIndex, parameterIndex))
path = ''
tag = node.GetParameterTag(groupIndex, parameterIndex)
if tag == "image" or tag == "geometry" or tag == "transform":
# Write file in a temporary
IOnode = slicer.util.getNode(node.GetParameterAsString(node.GetParameterName(groupIndex, parameterIndex)))
if IOnode:
path = self.logic.nodeWriter(IOnode, slicer.app.temporaryPath)
value = os.path.basename(path)
if tag == "file":
path = node.GetParameterAsString(node.GetParameterName(groupIndex, parameterIndex))
value = os.path.basename(path)
if tag == "table" and node.GetParameterType(groupIndex, parameterIndex) == "color":
i = 1
while parameterIndex - i >= 0 :
if node.GetParameterType(groupIndex, parameterIndex-i) == 'label':
labelNode = slicer.util.getNode(node.GetParameterAsString(node.GetParameterName(groupIndex, parameterIndex-i)))
path = self.logic.nodeWriter(labelNode.GetDisplayNode().GetColorNode(), slicer.app.temporaryPath)
value = os.path.basename(path)
i = 100
i += 1
channel = node.GetParameterChannel(groupIndex, parameterIndex)
if channel and path:
if channel == "input":
attachments.append(path)
cli["inputs"].append({
"name": value
})
else:
if os.path.isdir(path) or os.path.basename(path).find('.') == -1:
type = 'directory'
else:
type = 'file'
cli["outputs"].append({
"type": type,
"name": value
})
cli['parameters'].append({
"flag": flag,
"name": value
# "name": node.GetParameterName(groupIndex, parameterIndex),
# "value": value,
# "type": node.GetParameterTag(groupIndex, parameterIndex)
})
import pprint
pprint.pprint(cli)
if not self.ClusterpostLib.server:
self.ClusterpostLib.setServerUrl(self.DatabaseInteractorLib.server[:-1])
self.ClusterpostLib.createAndSubmitJob(cli, attachments)
# ----------- Calendars ----------- #
def fillSelectorWithAttachments(self):
"""
Function used to fill a comboBox with attachments retrieved by queries
"""
self.downloadAttachmentSelector.clear()
self.downloadErrorText.hide()
self.downloadButton.enabled = True
attachmentName = ""
if self.downloadRadioButtonPatientOnly.isChecked():
for items in self.morphologicalData:
if items["patientId"] == self.downloadPatientSelector.currentText:
if "_attachments" in items:
for attachmentName in items["_attachments"].keys():
self.downloadAttachmentSelector.addItem(attachmentName)
else:
for items in self.morphologicalData:
# Check if the date is the same
if not "date" in items:
items["date"] = "NoDate"
if items["patientId"] == self.downloadPatientSelector.currentText and items["date"][:10] == str(
self.downloadDate.selectedDate):
if "_attachments" in items:
for attachmentName in items["_attachments"].keys():
self.downloadAttachmentSelector.addItem(attachmentName)
if attachmentName == "":
self.downloadAttachmentSelector.addItem("None")
self.downloadErrorText.show()
self.downloadButton.enabled = False
# ------ Filepath selectors ------- #
def createFilesDictionary(self):
"""
Function used to create a dictionary corresponding to the collection architecture
"""
directoryPath = self.uploadFilepathSelector.directory
# Check if the directory selected is a valid collection
if not os.path.exists(os.path.join(directoryPath, '.DBIDescriptor')):
self.clearCheckBoxList()
self.attachmentsList = {}
self.uploadPatientSelector.clear()
self.uploadPatientSelector.addItem("None")
self.uploadDateSelector.clear()
self.uploadDateSelector.addItem("None")
return
self.clearCheckBoxList()
self.attachmentsList = {}
# Iterate over patients
for folderName in os.listdir(directoryPath):
if folderName[0] != '.':
self.attachmentsList[folderName] = {}
# Iterate over dates
for dates in os.listdir(os.path.join(directoryPath, folderName)):
if dates[0] != '.':
self.attachmentsList[folderName][dates] = {}
self.attachmentsList[folderName][dates]["items"] = []
self.attachmentsList[folderName][dates]["checkbox"] = {}
# Fill with attachment names
for files in os.listdir(os.path.join(directoryPath, folderName, dates)):
if files[0] != ".":
self.attachmentsList[folderName][dates]["items"].append(files)
# Fill the patient selector comboBox with patients with changes
self.uploadPatientSelector.clear()
self.uploadPatientSelector.addItems(self.attachmentsList.keys())
if self.uploadPatientSelector.count != 0:
self.uploadButton.enabled = True
else:
self.uploadButton.enabled = False
self.uploadPatientSelector.addItem("None")
def onManagementDirectorySelected(self):
"""
Function used to chose what signal to connect depending on management action checked
"""
if self.managementRadioButtonPatient.isChecked():
self.isPossibleCreatePatient()
else:
self.fillSelectorWithDescriptorPatients()
self.isPossibleAddDate()
# ---------------------------------------------------- #
# ------------------ Other functions ----------------- #
# ---------------------------------------------------- #
def clearCheckBoxList(self):
"""
Function used to clear the layout which displays the checkboxes for upload
"""
for patient in self.attachmentsList.keys():
for date in self.attachmentsList[patient].keys():
for items in self.attachmentsList[patient][date]["checkbox"].keys():
self.uploadListLayout.removeWidget(self.attachmentsList[patient][date]["checkbox"][items])
self.attachmentsList[patient][date]["checkbox"][items].delete()
self.attachmentsList[patient][date]["checkbox"] = {}
self.noneLabel.setText("")
self.uploadListLayout.removeWidget(self.noneLabel)
def checkBoxesChecked(self):
"""
Function used to store in a dictionary the attachments selected to be uploaded
"""
self.checkedList = {}
for patient in self.attachmentsList.keys():
self.checkedList[patient] = {}
for date in self.attachmentsList[patient].keys():
self.checkedList[patient][date] = {}
self.checkedList[patient][date]["items"] = []
for items in self.attachmentsList[patient][date]["checkbox"].keys():
if str(self.attachmentsList[patient][date]["checkbox"][items].checkState()) == "2" :
self.checkedList[patient][date]["items"].append(items)
def highlightDates(self):
"""
Function used to color the dates which contain one or multiple attachments for a given patientId
"""
for items in self.morphologicalData:
if "date" in items:
date = items["date"]
if items["patientId"] == self.downloadPatientSelector.currentText:
date = items["date"]
self.downloadDate.setDateTextFormat(qt.QDate(int(date[:4]),int(date[5:7]),int(date[8:10])),
self.checkableDateFormat)
else:
self.downloadDate.setDateTextFormat(qt.QDate(int(date[:4]), int(date[5:7]), int(date[8:10])),
self.normalDateFormat)
def overflow(self):
"""
Function triggered by the overflow of the timer
"""
self.timer.stop()
print(">>>>>>>>>>>>>>>>")
# Retrieve the jobs to be computed in the database
jobs = self.ClusterpostLib.getJobs(jobstatus='QUEUE')
if jobs:
self.runJob(jobs[0])
self.timer.start(self.timerPeriod)
def runJob(self, job):
"""
Function used to run a job and send it back to the server
"""
# Creates a folder to store IO for the job. Folder is in Slicer temporary path and named with job id
jobpath = os.path.join(slicer.app.temporaryPath, job["_id"])
if os.path.exists(jobpath):
shutil.rmtree(jobpath)
os.makedirs(jobpath)
# Check if the executable needed is in the current instance of Slicer
if hasattr(slicer.modules, job["executable"].lower()):
executableNode = getattr(slicer.modules, job["executable"].lower())
command = list()
# Depending on Slicer version, the executable path is pointing to the library or the executable
if os.path.basename(executableNode.path).find('.') == -1:
command.append(executableNode.path)
else:
command.append(os.path.join(os.path.dirname(executableNode.path), executableNode.name))
# Parse all the parameters to create de CLI command
for parameter in job["parameters"]:
if not parameter["name"] == "" and not parameter["flag"] == "":
if parameter["name"] == "true":
command.append(parameter["flag"])
elif not parameter["name"] == "false":
command.append(parameter["flag"])
command.append(parameter["name"])
elif parameter["flag"] == "":
command.append(parameter["name"])
self.ClusterpostLib.updateJobStatus(job["_id"], "DOWNLOADING")
# Downloads the attachments and store them in the previously created folder
for attachment in job["_attachments"]:
self.ClusterpostLib.getAttachment(job["_id"], attachment, os.path.join(jobpath, attachment))
if attachment in command:
i = command.index(attachment)
command[i] = os.path.join(jobpath, attachment)
# This value is set as true if there is at least one output with type folder
directory = False
# Formating the ouput content (file/directory)
for output in job["outputs"]:
if output['type'] == 'directory':
if os.path.basename(output["name"]):
if output["name"] in command:
i = command.index(output["name"])
command[i] = os.path.join(jobpath, os.path.basename(output["name"]))
folderName = os.path.basename(output["name"])
else:
if output["name"] in command:
i = command.index(output["name"])
command[i] = os.path.join(jobpath)
folderName = os.path.basename(os.path.dirname(output["name"]))
directory = True
else:
file = open(os.path.join(jobpath, output["name"]), 'w+')
file.close()
if output["name"] in command:
i = command.index(output["name"])
command[i] = os.path.join(jobpath, output["name"])
# Check the files before computation
filesBeforeComputation = os.listdir(jobpath)
print(command)
self.ClusterpostLib.updateJobStatus(job["_id"], "RUN")
try:
# Subprocess is a way to run a cli from python
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.displayConsole.append(out)
self.displayConsole.append(err)
# Check files differences before and after computation for output with type directory
filesAfterComputation = os.listdir(jobpath)
filesDifference = list(set(filesAfterComputation) - set(filesBeforeComputation))
self.ClusterpostLib.updateJobStatus(job["_id"], "UPLOADING")
# Upload stdout and stderr
with open(os.path.join(jobpath, 'stdout.out'), 'w') as f:
f.write(out)
with open(os.path.join(jobpath, 'stderr.err'), 'w') as f:
f.write(err)
self.ClusterpostLib.addAttachment(job["_id"], os.path.join(jobpath, 'stdout.out'))
self.ClusterpostLib.addAttachment(job["_id"], os.path.join(jobpath, 'stderr.err'))
outputSize = []
for output in job["outputs"]:
if output['type'] == 'file':
self.ClusterpostLib.addAttachment(job["_id"],
os.path.join(jobpath, output["name"]))
outputSize.append(os.stat(os.path.join(jobpath, output["name"])).st_size)
# Add all the new files in a zip file
if not directory:
folderName = "computationFiles"
print(filesDifference)
if len(filesDifference) > 0:
with zipfile.ZipFile(os.path.join(jobpath, folderName + '.zip'), 'w') as myzip:
for file in filesDifference:
myzip.write(os.path.join(jobpath, file), file)
self.ClusterpostLib.addAttachment(job["_id"],
os.path.join(jobpath, folderName + '.zip'))
outputSize.append(os.stat(os.path.join(jobpath, folderName + '.zip')).st_size)
# Check if the output file is not empty
if 0 in outputSize:
self.ClusterpostLib.updateJobStatus(job["_id"], "FAIL")
else:
self.ClusterpostLib.updateJobStatus(job["_id"], "DONE")
except Exception as e:
with open(os.path.join(jobpath, 'stderr.err'), 'w') as f:
f.write(str(e))
self.ClusterpostLib.addAttachment(job["_id"], os.path.join(jobpath, 'stderr.err'))
self.ClusterpostLib.updateJobStatus(job["_id"], "FAIL")
#
# DatabaseInteractorLogic
#
class DatabaseInteractorLogic(slicer.ScriptedLoadableModule.ScriptedLoadableModuleLogic):
def fileLoader(self, filepath):
"""
Function used to load a file in the node corresponding to the file extension
Documentation : http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.5/SlicerApplication/SupportedDataFormat
"""
sceneExtensions = ["mrml","mrb","xml","xcat"]
volumeExtensions = ["dcm","nrrd","nhdr","mhd","mha","vtk","hdr","img","nia","nii","bmp","pic","mask","gipl","jpg","jpeg","lsm","png","spr","tif","tiff","mgz","mrc","rec"]
modelExtensions = ["vtk","vtp","stl","obj","orig","inflated","sphere","white","smoothwm","pial","g","byu"]
fiducialExtensions = ["fcsv","txt"]
rulerExtensions = ["acsv","txt"]
transformExtensions = ["tfm","mat","txt","nrrd","nhdr","mha","mhd","nii"]
volumeRenderingExtensions = ["vp","txt"]
colorsExtensions = ["ctbl","txt"]
extension = ""
if filepath.rfind(".") != -1:
extension = filepath[filepath.rfind(".") + 1:]
if extension == "gz":
extension = filepath[filepath[:filepath.rfind(".")].rfind(".") + 1:filepath.rfind(".")]
if extension in sceneExtensions:
slicer.util.loadScene(filepath)
if extension in volumeExtensions:
slicer.util.loadVolume(filepath)
if extension in modelExtensions:
slicer.util.loadModel(filepath)
if extension in fiducialExtensions:
if not slicer.util.loadFiducialList(filepath):
if not slicer.util.loadAnnotationFiducial(filepath):
slicer.util.loadNodeFromFile(filepath)
# if extension in rulerExtensions:
if extension in transformExtensions:
slicer.util.loadTrandform(filepath)
# if extension in volumeRenderingExtensions:
if extension in colorsExtensions:
slicer.util.loadColorTable(filepath)
def nodeWriter(self, node, dirpath):
"""
Function used to write the content of a node in a file given the node and the path
"""
fileName = node.GetName()
# Window UI to choose file extension
extensionBox = qt.QDialog()
extensionBox.setWindowTitle("Select file extension")
extensionBoxLayout = qt.QFormLayout()
extensionComboBox = qt.QComboBox()
extensionBoxLayout.addRow(fileName, extensionComboBox)
buttonBox = qt.QDialogButtonBox(qt.QDialogButtonBox.Ok)
extensionBoxLayout.addWidget(buttonBox)
extensionBox.setLayout(extensionBoxLayout)
buttonBox.accepted.connect(extensionBox.accept)
buttonBox.rejected.connect(extensionBox.reject)
if "LabelMap" in node.GetClassName() or "ScalarVolume" in node.GetClassName():
extensionComboBox.addItems([".nrrd",".nii",".gipl.gz"])
extensionBox.exec_()
extension = extensionComboBox.currentText
if "ColorTable" in node.GetClassName():
extension = '.txt'
if "ModelHierarchy" in node.GetClassName():
extension = '.mrml'
if "ModelNode" in node.GetClassName():
extensionComboBox.addItems([".vtk", ".stl", ".obj"])
extensionBox.exec_()
extension = extensionComboBox.currentText
if "Transform" in node.GetClassName():
extension = ".mat"
write = slicer.util.saveNode(node,os.path.join(dirpath,fileName + extension))
if not write and extension == ".mrml":
slicer.util.saveScene(os.path.join(dirpath, fileName + extension))
return os.path.join(dirpath,fileName + extension)
class DatabaseInteractorTest(slicer.ScriptedLoadableModule.ScriptedLoadableModuleTest):
def setUp(self):
"""
Function used to reset the scene for the tests
"""
self.widget = slicer.modules.DatabaseInteractorWidget
self.DatabaseInteractorLib = self.widget.DatabaseInteractorLib
slicer.mrmlScene.Clear(0)
self.DatabaseInteractorLib.disconnect()
def runTest(self):
"""
Function used to run some tests about the extension behaviour
"""
self.runTestDbInteractor()
self.runTestClusterpost()
# Run the tests
def runTestDbInteractor(self):
self.setUp()
self.delayDisplay(' Starting tests ')
self.delayDisplay(' Test Login Function ')
self.assertTrue(self.test_Login())
self.delayDisplay(' Test createCollection Function ')
self.assertTrue(self.test_createCollection())
self.delayDisplay(' Test getCollection Function ')
self.assertTrue(self.test_getCollection())
self.delayDisplay(' Test createPatient Function ')
self.assertTrue(self.test_createPatient())
self.delayDisplay(' Test importData Function ')
self.assertTrue(self.test_importData())
self.delayDisplay(' Test uploadAttachment Function ')
self.assertTrue(self.test_uploadAttachment())
self.delayDisplay(' Test getAttachment Function ')
self.assertTrue(self.test_getAttachment())
self.delayDisplay(' Test deletePatient Function ')
self.assertTrue(self.test_deletePatient())
self.delayDisplay(' Test deleteCollection Function ')
self.assertTrue(self.test_deleteCollection())
self.delayDisplay(' Tests Passed! ')
def test_Login(self):
""" ----------------------------------------------------------------
---------------------- Login to the server ---------------------
---------------------------------------------------------------- """
server = 'http://localhost:8180/'
user = 'clement.mirabel@gmail.com'
password = 'Password1234'
self.delayDisplay('Attempting to connect to %s.' % (server))
self.DatabaseInteractorLib.setServer(server, slicer.app.temporaryPath + '/user.slicer_server')
token,error = self.DatabaseInteractorLib.connect(user,password)
if token == -1:
print("Connection Failed : " + error)
return False
print("Connection Passed !")
return True
def test_createCollection(self):
""" ----------------------------------------------------------------
------------------ Creating a test collection ------------------
---------------------------------------------------------------- """
data = {"items": "[]",
"type": "morphologicalDataCollection",
"name": "CollectionTest"}
rep = self.DatabaseInteractorLib.createMorphologicalDataCollection(data)
if rep == -1:
print("Collection creation Failed!")
return False
print("Collection creation Passed!")
return True
def test_getCollection(self):
""" ----------------------------------------------------------------
------------------ Getting the test collection -----------------
---------------------------------------------------------------- """
rep = self.DatabaseInteractorLib.getMorphologicalDataCollections()
for items in rep.json():
if items["name"]=="CollectionTest":
self.collectionTestId = items["_id"]
print("Getting collection Passed!")
return True
print("Getting collection Failed!")
return False
def test_createPatient(self):
""" ----------------------------------------------------------------
---------------------- Creating a patient ----------------------
---------------------------------------------------------------- """
data = {"type": "morphologicalData", "patientId": "PatientTest"}
rep = self.DatabaseInteractorLib.createMorphologicalData(data)
if rep == -1:
print("Patient creation Failed!")
return False
self.patientId = rep.json()["id"]
rep = self.DatabaseInteractorLib.getMorphologicalDataCollection(self.collectionTestId).json()
rep["items"].append({'_id': self.patientId})
upd = self.DatabaseInteractorLib.updateMorphologicalDataCollection(json.dumps(rep))
if upd == -1:
print("Patient creation Failed!")
return False
print("Patient creation Passed!")
return True
def test_uploadAttachment(self):
""" ----------------------------------------------------------------
-------------------- Uploading an attachment -------------------
---------------------------------------------------------------- """
self.moduleName = 'DatabaseInteractor'
filePath = slicer.app.temporaryPath + '/FA.nrrd'
file = open(filePath,'rb')
data = file.read()
rep = self.DatabaseInteractorLib.addAttachment(self.patientId,'attachmentTest.nrrd',data)
if rep == -1:
print("Attachment upload Failed!")
return False
print("Attachment upload Passed!")
return True
def test_getAttachment(self):
""" ----------------------------------------------------------------
-------------------- Getting an attachment -------------------
---------------------------------------------------------------- """
rep = self.DatabaseInteractorLib.getAttachment(self.patientId,'attachmentTest.nrrd', 'blob')
if rep == -1:
print("Getting attachment Failed!")
return False
filePath = slicer.app.temporaryPath + '/attachmentTest.nrrd'
with open(filePath, 'wb+') as fd:
for chunk in rep.iter_content(2048):
fd.write(chunk)
slicer.util.loadVolume(filePath)
print("Getting attachment Passed!")
return True
def test_deletePatient(self):
""" ----------------------------------------------------------------
-------------------- Delete the test patient -------------------
---------------------------------------------------------------- """
rep = self.DatabaseInteractorLib.deleteMorphologicalData(self.patientId)
if rep == -1:
print("Patient deletion Failed!")
return False
print("Patient deletion Passed!")
return True
def test_deleteCollection(self):
""" ----------------------------------------------------------------
------------------ Delete the test collection ------------------
---------------------------------------------------------------- """
rep = self.DatabaseInteractorLib.deleteMorphologicalDataCollection(self.collectionTestId)
if rep == -1:
print("Collection deletion Failed!")
return False
print("Collection deletion Passed!")
return True
def test_importData(self):
""" ----------------------------------------------------------------
------------------- Download some data online ------------------
---------------------------------------------------------------- """
import urllib.request, urllib.parse, urllib.error
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd'),
)
for url, name in downloads:
filePath = os.path.join(slicer.app.temporaryPath, name)
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.request.urlretrieve(url, filePath)
return True
def runTestClusterpost(self):
import ClusterpostLib
import urllib.request, urllib.parse, urllib.error
self.testfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../DatabaseInteractor.png")
self.setUp()
self.clusterpost = ClusterpostLib.ClusterpostLib()
self.clusterpost.setServerUrl("http://localhost:8180")
self.delayDisplay(' Starting tests ')
self.assertTrue(self.testClusterpostLogin())
self.assertTrue(self.testGetExecutionServers())
self.assertTrue(self.testCreateJob())
self.assertTrue(self.testAddAttachment())
self.assertTrue(self.testExecuteJob())
self.assertTrue(self.testGetJob())
self.assertTrue(self.testGetJobs())
self.assertTrue(self.testGetDocumentAttachment())
self.assertTrue(self.testCreateAndSubmitJob())
self.assertTrue(self.testGetJobsDone())
def testClusterpostLogin(self):
self.clusterpost.userLogin({
"email": "algiedi85@gmail.com",
"password": "123Algiedi!"
})
return True
def testGetExecutionServers(self):
servers = self.clusterpost.getExecutionServers()
print(servers)
self.executionserver = servers[0]["name"]
return True
def testCreateJob(self):
job = {
"executable": "cksum",
"parameters": [
{
"flag": "",
"name": "DatabaseInteractor.png"
}
],
"inputs": [
{
"name": "DatabaseInteractor.png"
}
],
"outputs": [
{
"type": "directory",
"name": "./"
},
{
"type": "tar.gz",
"name": "./"
},
{
"type": "file",
"name": "stdout.out"
},
{
"type": "file",
"name": "stderr.err"
}
],
"type": "job",
"userEmail": "algiedi85@gmail.com",
"executionserver": self.executionserver
}
res = self.clusterpost.createJob(job)
self.jobid = res["id"]
return True
def testAddAttachment(self):
res = self.clusterpost.addAttachment(self.jobid, self.testfile)
return True
def testExecuteJob(self):
res = self.clusterpost.executeJob(self.jobid)
return True
def testGetJob(self):
res = self.clusterpost.getJob(self.jobid)
return True
def testGetJobs(self):
res = self.clusterpost.getJobs("cksum")
return True
def testGetDocumentAttachment(self):
res = self.clusterpost.getAttachment(self.jobid, self.testfile, "/tmp/out.png", "blob")
return True
def testCreateAndSubmitJob(self):
job = {
"executable": "cksum",
"parameters": [
{
"flag": "",
"name": "DatabaseInteractor.png"
}
],
"inputs": [
{
"name": "DatabaseInteractor.png"
}
],
"outputs": [
{
"type": "directory",
"name": "./"
},
{
"type": "tar.gz",
"name": "./"
},
{
"type": "file",
"name": "stdout.out"
},
{
"type": "file",
"name": "stderr.err"
}
],
"type": "job",
"userEmail": "algiedi85@gmail.com",
"executionserver": self.executionserver
}
files = [self.testfile]
res = self.clusterpost.createAndSubmitJob(job, [self.testfile])
return True
def testGetJobsDone(self):
outdir = "/tmp/"
self.clusterpost.getJobsDone(outdir)
return True
|
ClementMirabel/DatabaseInteractorExtension
|
DatabaseInteractor/DatabaseInteractor.py
|
Python
|
apache-2.0
| 79,849
|
[
"VTK"
] |
d2928ce17b3f1b2eaf944f47145a3ef28f02008d2a824782cdf4838dee044ee8
|
from __future__ import print_function
import h2o
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
def stackedensemble_gaussian():
#
# australia.csv: Gaussian
#
australia_hex = h2o.import_file(path=pyunit_utils.locate("smalldata/extdata/australia.csv"),
destination_frame="australia.hex")
myX = ["premax", "salmax", "minairtemp", "maxairtemp", "maxsst", "maxsoilmoist", "Max_czcs"]
# myXSmaller = ["premax", "salmax","minairtemp", "maxairtemp", "maxsst", "maxsoilmoist"]
# dependent = "runoffnew"
my_gbm = H2OGradientBoostingEstimator(ntrees=10, max_depth=3, min_rows=2, learn_rate=0.2, nfolds=5,
fold_assignment="Modulo", keep_cross_validation_predictions=True,
distribution="AUTO")
my_gbm.train(y="runoffnew", x=myX, training_frame=australia_hex)
print("GBM performance: ")
my_gbm.model_performance(australia_hex).show()
my_rf = H2ORandomForestEstimator(ntrees=10, max_depth=3, min_rows=2, nfolds=5,
fold_assignment="Modulo", keep_cross_validation_predictions=True)
my_rf.train(y="runoffnew", x=myX, training_frame=australia_hex)
print("RF performance: ")
my_rf.model_performance(australia_hex).show()
my_dl = H2ODeepLearningEstimator(nfolds=5, fold_assignment="Modulo", keep_cross_validation_predictions=True)
my_dl.train(y="runoffnew", x=myX, training_frame=australia_hex)
print("DL performance: ")
my_dl.model_performance(australia_hex).show()
# NOTE: don't specify family
my_glm = H2OGeneralizedLinearEstimator(nfolds=5, fold_assignment="Modulo",
keep_cross_validation_predictions=True)
my_glm.train(y="runoffnew", training_frame=australia_hex)
# my_glm.train(y = "runoffnew", x = myX, training_frame = australia_hex)
# my_glm.train(y = "runoffnew", x = myXSmaller, training_frame = australia_hex) # test parameter error-checking
print("GLM performance: ")
my_glm.model_performance(australia_hex).show()
stack = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id, my_glm.model_id])
stack.train(model_id="my_ensemble", x=myX, y="runoffnew", training_frame=australia_hex)
# test ignore_columns parameter checking
# stack.train(model_id="my_ensemble", y="runoffnew", training_frame=australia_hex, ignored_columns=["premax"])
predictions = stack.predict(australia_hex) # training data
print("Predictions for australia ensemble are in: " + predictions.frame_id)
#
# ecology.csv: Gaussian
#
ecology_train = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"), destination_frame="ecology_train")
myX = ["SegSumT", "SegTSeas", "SegLowFlow", "DSDist", "DSMaxSlope", "USAvgT", "USRainDays", "USSlope", "USNative", "DSDam", "Method", "LocSed"]
# myXSmaller = ["SegSumT", "SegTSeas", "SegLowFlow"]
my_gbm = H2OGradientBoostingEstimator(ntrees = 10, max_depth = 3, min_rows = 2, learn_rate = 0.2, nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True, distribution = "AUTO")
my_gbm.train(y = "Angaus", x = myX, training_frame = ecology_train)
print("GBM performance: ")
my_gbm.model_performance(ecology_train).show()
my_rf = H2ORandomForestEstimator(ntrees = 10, max_depth = 3, min_rows = 2, nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True)
my_rf.train(y = "Angaus", x = myX, training_frame = ecology_train)
print("RF performance: ")
my_rf.model_performance(ecology_train).show()
my_dl = H2ODeepLearningEstimator(nfolds=5, fold_assignment='Modulo', keep_cross_validation_predictions = True)
my_dl.train(y = "Angaus", x = myX, training_frame = ecology_train)
print("DL performance: ")
my_dl.model_performance(ecology_train).show()
# NOTE: don't specify family
my_glm = H2OGeneralizedLinearEstimator(nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True)
my_glm.train(y = "Angaus", x = myX, training_frame = ecology_train)
print("GLM performance: ")
my_glm.model_performance(ecology_train).show()
stack = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id, my_glm.model_id])
print("created H2OStackedEnsembleEstimator: " + str(stack))
stack.train(model_id="my_ensemble", y="Angaus", training_frame=ecology_train)
print("trained H2OStackedEnsembleEstimator: " + str(stack))
print("trained H2OStackedEnsembleEstimator via get_model: " + str(h2o.get_model("my_ensemble")))
predictions = stack.predict(ecology_train) # training data
print("predictions for ensemble are in: " + predictions.frame_id)
#
# insurance.csv: Poisson
#
insurance_train = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/insurance.csv"), destination_frame="insurance_train")
insurance_train["offset"] = insurance_train["Holders"].log()
myX = list(range(3))
my_gbm = H2OGradientBoostingEstimator(ntrees = 10, max_depth = 3, min_rows = 2, learn_rate = 0.2, nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True, distribution = 'poisson')
my_gbm.train(y = "Claims", x = myX, training_frame = insurance_train)
print("GBM performance: ")
my_gbm.model_performance(insurance_train).show()
my_rf = H2ORandomForestEstimator(ntrees = 10, max_depth = 3, min_rows = 2, nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True)
my_rf.train(y = "Claims", x = myX, training_frame = insurance_train)
print("RF performance: ")
my_rf.model_performance(insurance_train).show()
my_dl = H2ODeepLearningEstimator(nfolds=5, fold_assignment='Modulo', keep_cross_validation_predictions = True, distribution = 'poisson')
my_dl.train(y = "Claims", x = myX, training_frame = insurance_train)
print("DL performance: ")
my_dl.model_performance(insurance_train).show()
# NOTE: don't specify family
my_glm = H2OGeneralizedLinearEstimator(nfolds = 5, fold_assignment='Modulo', keep_cross_validation_predictions = True, family = 'poisson')
my_glm.train(y = "Claims", x = myX, training_frame = insurance_train)
print("GLM performance: ")
my_glm.model_performance(insurance_train).show()
stack = H2OStackedEnsembleEstimator(base_models=[my_gbm.model_id, my_rf.model_id, my_glm.model_id])
print("created H2OStackedEnsembleEstimator: " + str(stack))
stack.train(model_id="my_ensemble", y="Claims", training_frame=insurance_train)
print("trained H2OStackedEnsembleEstimator: " + str(stack))
print("metalearner: ")
print(h2o.get_model(stack.metalearner()['name']))
predictions = stack.predict(insurance_train) # training data
print("preditions for ensemble are in: " + predictions.frame_id)
if __name__ == "__main__":
pyunit_utils.standalone_test(stackedensemble_gaussian)
else:
stackedensemble_gaussian()
|
mathemage/h2o-3
|
h2o-py/tests/testdir_algos/stackedensemble/pyunit_stackedensemble_regression.py
|
Python
|
apache-2.0
| 7,468
|
[
"Gaussian"
] |
953e57e28434c2aa338979b8a0f09c3a34dcbdfd6ac120f3cef4f17e9317cf23
|
# Copyright (c) 2015,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Module implements the test cases specified in the performance test protocol
Created on Wed Apr 1 10:59:05 2015
@author: christoph.paulik@geo.tuwien.ac.at
'''
import os
import glob
from datetime import datetime
from smdc_perftests.performance_tests import test_cases
from smdc_perftests.datasets import esa_cci
from smdc_perftests.datasets import ascat
from smdc_perftests import helper
def run_performance_tests(name, dataset, save_dir,
gpi_list=None,
date_range_list=None,
cell_list=None,
cell_date_list=None,
gpi_read_perc=1.0,
date_read_perc=1.0,
cell_read_perc=1.0,
max_runtime_per_test=None,
repeats=1):
"""
Run a complete test suite on a dataset and store the results
in the specified directory
Parameters
----------
name: string
name of the test run, used for filenaming
dataset: dataset instance
instance implementing the get_timeseries,
get_avg_image and get_data methods.
save_dir: string
directory to store the test results in
gpi_list: list, optional
list of possible grid point indices, if given the
timeseries reading tests will be run
date_range_list: list, optional
list of possible dates, if given then the read_avg_image
and read_data tests will be run.
The format is a list of lists e.g.
[[datetime(2007,1,1), datetime(2007,1,1)], #reads one day
[datetime(2007,1,1), datetime(2007,12,31)]] # reads one year
cell_list: list, optional
list of possible cells to read from. if given then the read_data
test will be run
cell_date_list: list, optional
list of time intervals to read per cell. Should be as long as the
cell list or longer.
gpi_read_perc: float, optional
percentage of random selection from gpi_list read for each try
date_read_perc: float, optioanl
percentage of random selection from date_range_list read for each try
cell_read_perc: float, optioanl
percentage of random selection from cell_range_list read for each try
max_runtime_per_test: float, optional
maximum runtime per test in seconds, if given the tests will be aborted
after taking more than this time
repeats: int, optional
number of repeats for each measurement
"""
timed_dataset = test_cases.SelfTimingDataset(dataset)
timed_avg_img_dataset = test_cases.SelfTimingDataset(dataset)
if gpi_list is not None:
# test reading of time series by grid point/location id
test_name = '{}_test-rand-gpi'.format(name)
@test_cases.measure(test_name, runs=repeats)
def test_rand_gpi():
test_cases.read_rand_ts_by_gpi_list(timed_dataset, gpi_list,
read_perc=gpi_read_perc,
max_runtime=max_runtime_per_test)
results = test_rand_gpi()
results.to_nc(os.path.join(save_dir, test_name + ".nc"))
detailed_results = test_cases.TestResults(
timed_dataset.measurements['get_timeseries'],
name=test_name + "_detailed")
detailed_results.to_nc(
os.path.join(save_dir, test_name + "_detailed.nc"))
if date_range_list is not None:
# test reading of daily images, only start date is given
test_name = '{}_test-rand-daily-img'.format(name)
# make date list containing just the start dates for reading images
date_list = []
for d1, d2 in date_range_list:
date_list.append(d1)
@test_cases.measure(test_name, runs=repeats)
def test_rand_img():
test_cases.read_rand_img_by_date_list(timed_dataset, date_list,
read_perc=date_read_perc,
max_runtime=max_runtime_per_test)
results = test_rand_img()
results.to_nc(os.path.join(save_dir, test_name + ".nc"))
detailed_results = test_cases.TestResults(
timed_dataset.measurements['get_avg_image'],
name=test_name + "_detailed")
detailed_results.to_nc(
os.path.join(save_dir, test_name + "_detailed.nc"))
# test reading of averaged images
test_name = '{}_test-rand-avg-img'.format(name)
@test_cases.measure(test_name, runs=repeats)
def test_avg_img():
test_cases.read_rand_img_by_date_range(timed_avg_img_dataset, date_range_list,
read_perc=date_read_perc,
max_runtime=max_runtime_per_test)
results = test_avg_img()
results.to_nc(os.path.join(save_dir, test_name + ".nc"))
detailed_results = test_cases.TestResults(
timed_avg_img_dataset.measurements['get_avg_image'],
name=test_name + "_detailed")
detailed_results.to_nc(
os.path.join(save_dir, test_name + "_detailed.nc"))
if cell_list is not None and cell_date_list is not None:
# test reading of complete cells
test_name = '{}_test-rand-cells-data'.format(name)
@test_cases.measure(test_name, runs=repeats)
def test_read_cell_data():
test_cases.read_rand_cells_by_cell_list(timed_dataset, cell_date_list, cell_list,
read_perc=cell_read_perc,
max_runtime=max_runtime_per_test)
results = test_read_cell_data()
results.to_nc(os.path.join(save_dir, test_name + ".nc"))
detailed_results = test_cases.TestResults(
timed_dataset.measurements['get_data'],
name=test_name + "_detailed")
detailed_results.to_nc(
os.path.join(save_dir, test_name + "_detailed.nc"))
def run_esa_cci_netcdf_tests(test_dir, results_dir, variables=['sm']):
"""
function for running the ESA CCI netCDF performance tests
the tests will be run for all .nc files in the test_dir
Parameters
----------
test_dir: string
path to the test files
results_dir: string
path in which the results should be stored
variables: list
list of variables to read for the tests
"""
filelist = glob.glob(os.path.join(test_dir, "*.nc"))
for filen in filelist:
print "testing file", filen
dataset = esa_cci.ESACCI_netcdf(filen, variables=variables)
# get filename and use as name for test
name = os.path.splitext(os.path.split(filen)[1])[0]
# generate date list
date_range_list = helper.generate_date_list(
datetime(1980, 1, 1), datetime(2013, 12, 31), n=10000)
run_performance_tests(name, dataset, results_dir,
gpi_list=dataset.grid.land_ind,
date_range_list=date_range_list,
gpi_read_perc=0.1, repeats=1)
def run_esa_cci_tests(dataset, testname, results_dir, n_dates=10000,
date_read_perc=0.1, gpi_read_perc=0.1,
repeats=3, cell_read_perc=10.0,
max_runtime_per_test=None):
"""
Runs the ESA CCI tests given a dataset instance
Parameters
----------
dataset: Dataset instance
Instance of a Dataset class
testname: string
Name of the test, used for storing the results
results_dir: string
path where to store the test restults
n_dates: int, optional
number of dates to generate
date_read_perc: float, optioanl
percentage of random selection from date_range_list read for each try
gpi_read_perc: float, optional
percentage of random selection from gpi_list read for each try
repeats: int, optional
number of repeats of the tests
cell_list: list, optional
list of possible cells to read from. if given then the read_data
test will be run
max_runtime_per_test: float, optional
maximum runtime per test in seconds, if given the tests will be aborted
after taking more than this time
"""
date_start = datetime(1980, 1, 1)
date_end = datetime(2013, 12, 31)
date_range_list = helper.generate_date_list(date_start, date_end, n=n_dates)
# test 500 "cells" with 500 months
cell_list=[0]*500
cell_date_list = helper.generate_date_list(date_start, date_end, n=len(cell_list))
grid = esa_cci.ESACCI_grid()
run_performance_tests(name=testname, dataset=dataset, save_dir=results_dir,
gpi_list=grid.land_ind,
date_range_list=date_range_list,
cell_list=cell_list,
cell_date_list=cell_date_list,
gpi_read_perc=gpi_read_perc,
date_read_perc=date_read_perc,
cell_read_perc=cell_read_perc,
max_runtime_per_test=max_runtime_per_test,
repeats=repeats)
def run_ascat_tests(dataset, testname, results_dir, n_dates=10000,
date_read_perc=0.1, gpi_read_perc=0.1, repeats=3,
cell_read_perc=10.0,
max_runtime_per_test=None):
"""
Runs the ASCAT tests given a dataset instance
Parameters
----------
dataset: Dataset instance
Instance of a Dataset class
testname: string
Name of the test, used for storing the results
results_dir: string
path where to store the test restults
n_dates: int, optional
number of dates to generate
date_read_perc: float, optioanl
percentage of random selection from date_range_list read for each try
gpi_read_perc: float, optional
percentage of random selection from gpi_list read for each try
repeats: int, optional
number of repeats of the tests
cell_list: list, optional
list of possible cells to read from. if given then the read_data
test will be run
max_runtime_per_test: float, optional
maximum runtime per test in seconds, if given the tests will be aborted
after taking more than this time
"""
date_start = datetime(2007, 1, 1)
date_end = datetime(2013, 12, 31)
date_range_list = helper.generate_date_list(date_start, date_end, n=n_dates)
grid = ascat.ASCAT_grid()
cell_list=grid.get_cells()
cell_date_list=helper.generate_date_list(date_start, date_end, n=len(cell_list))
run_performance_tests(testname, dataset, results_dir,
gpi_list=grid.land_ind,
date_range_list=date_range_list,
date_read_perc=date_read_perc,
gpi_read_perc=gpi_read_perc,
cell_read_perc=cell_read_perc,
repeats=repeats,
cell_list=cell_list,
cell_date_list=cell_date_list,
max_runtime_per_test=max_runtime_per_test)
def run_equi7_tests(dataset, testname, results_dir, n_dates=10000,
date_read_perc=0.1, gpi_read_perc=0.1, repeats=3,
cell_read_perc=100.0,
max_runtime_per_test=None):
"""
Runs the ASAR/Sentinel 1 Equi7 tests given a dataset instance
Parameters
----------
dataset: Dataset instance
Instance of a Dataset class
testname: string
Name of the test, used for storing the results
results_dir: string
path where to store the test restults
n_dates: int, optional
number of dates to generate
date_read_perc: float, optioanl
percentage of random selection from date_range_list read for each try
gpi_read_perc: float, optional
percentage of random selection from gpi_list read for each try
repeats: int, optional
number of repeats of the tests
cell_list: list, optional
list of possible cells to read from. if given then the read_data
test will be run
max_runtime_per_test: float, optional
maximum runtime per test in seconds, if given the tests will be aborted
after taking more than this time
"""
date_start = datetime(2015, 1, 8)
date_end = datetime(2015, 2, 18)
date_range_list = helper.generate_date_list(date_start, date_end, n=n_dates,
max_spread=5, min_spread=5)
gpi_list = range(2880000)
cell_list = range(2) * 50
cell_date_list=helper.generate_date_list(date_start, date_end, n=len(cell_list),
max_spread=5, min_spread=5)
run_performance_tests(testname, dataset, results_dir,
gpi_list=gpi_list,
date_range_list=date_range_list,
date_read_perc=date_read_perc,
gpi_read_perc=gpi_read_perc,
cell_read_perc=cell_read_perc,
repeats=repeats,
cell_list=cell_list,
cell_date_list=cell_date_list,
max_runtime_per_test=max_runtime_per_test)
if __name__ == '__main__':
path = os.path.join(
"/media", "sf_D", "SMDC", "performance_tests", "CCI_testdata")
run_esa_cci_netcdf_tests(
os.path.join(path, "compr-4"), os.path.join(path, "results"))
|
TUW-GEO/SMDC-performance
|
smdc_perftests/performance_tests/test_scripts.py
|
Python
|
bsd-3-clause
| 14,747
|
[
"NetCDF"
] |
cdc0b857c3378cd27ae6ba10da9fa3ab6bad113e6ca0a20fdc3383745a005f02
|
# Copyright (C) 2007 Alexandre Conrad, alexandre (dot) conrad (at) gmail (dot) com
# Copyright (C) 2009 Alexandre Bourget, alex@bourget.cc
#
# This module is part of FormAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from formalchemy import config
from sqlalchemy.orm import Query, class_mapper
from sqlalchemy.exceptions import InvalidRequestError # 0.4 support
import compiler
__all__ = ['stringify', 'normalized_options', '_pk', '_pk_one_column',
'simple_eval']
# see http://code.activestate.com/recipes/364469/ for explanation.
# 2.6 provides ast.literal_eval, but requiring 2.6 is a bit of a stretch for now
class _SafeEval(object):
def visit(self, node,**kw):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node, **kw)
def default(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
visitExpression = default
def visitName(self, node, **kw):
if node.name in ['True', 'False', 'None']:
return eval(node.name)
def visitConst(self, node, **kw):
return node.value
def visitTuple(self,node, **kw):
return tuple(self.visit(i) for i in node.nodes)
def visitList(self,node, **kw):
return [self.visit(i) for i in node.nodes]
def simple_eval(source):
"""like 2.6's ast.literal_eval, but only does constants, lists, and tuples, for serialized pk eval"""
if source == '':
return None
walker = _SafeEval()
ast = compiler.parse(source, 'eval')
return walker.visit(ast)
def stringify(k, null_value=u''):
if k is None:
return null_value
if isinstance(k, str):
return unicode(k, config.encoding)
elif isinstance(k, unicode):
return k
elif hasattr(k, '__unicode__'):
return unicode(k)
else:
return unicode(str(k), config.encoding)
def _pk_one_column(instance, column):
try:
attr = getattr(instance, column.key)
except AttributeError:
# FIXME: this is not clean but the only way i've found to retrieve the
# real attribute name of the primary key.
# This is needed when you use something like:
# id = Column('UGLY_NAMED_ID', primary_key=True)
# It's a *really* needed feature
cls = instance.__class__
for k in instance._sa_class_manager.keys():
props = getattr(cls, k).property
if hasattr(props, 'columns'):
if props.columns[0] is column:
attr = getattr(instance, k)
break
return attr
def _pk(instance):
# Return the value of this instance's primary key, suitable for passing to Query.get().
# Will be a tuple if PK is multicolumn.
try:
columns = class_mapper(type(instance)).primary_key
except InvalidRequestError:
return None
if len(columns) == 1:
return _pk_one_column(instance, columns[0])
return tuple([_pk_one_column(instance, column) for column in columns])
def query_options(L):
"""
Return a list of tuples of `(item description, item pk)`
for each item in the iterable L, where `item description`
is the result of str(item) and `item pk` is the item's primary key.
"""
return [(stringify(item), _pk(item)) for item in L]
def normalized_options(options):
"""
If `options` is an SA query or an iterable of SA instances, it will be
turned into a list of `(item description, item value)` pairs. Otherwise, a
copy of the original options will be returned with no further validation.
"""
if isinstance(options, Query):
options = options.all()
if callable(options):
return options
i = iter(options)
try:
first = i.next()
except StopIteration:
return []
try:
class_mapper(type(first))
except:
return list(options)
return query_options(options)
|
abourget/formalchemy-abourget
|
formalchemy/utils.py
|
Python
|
mit
| 4,067
|
[
"VisIt"
] |
acb596b98477daba4c6545db2e4ac532c7fe2d1f2f1a3ca847fbc63de0184c91
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 8 09:21:38 2016
@author: Felipe Leno
This file implements our advisor-advisee proposal.
This agent act as SARSA, and the exploration strategy is changed according to our proposal
"""
from sarsatile import SARSATile
from threading import Thread
from advice_util import AdviceUtil
import random
from time import sleep
import math
import agent
import abc
class AdHoc(SARSATile):
budgetAsk = 0
budgetAdvise = 0
spentBudgetAsk = 0
spentBudgetAdvise = 0
scalingVisits = math.exp(10)
lastStatus = agent.IN_GAME
#Enum for importance metrics
VISIT_IMPORTANCE, Q_IMPORTANCE = range(2)
stateImportanceMetric = None
adviceObject = None
ASK,ADVISE = range(2)
visitTable = None
advisedState = None
informAction = None #must be informed in subclass
def __init__(self, budgetAsk, budgetAdvise,stateImportanceMetric,seed=12345, port=12345,epsilon=0.1, alpha=0.1, gamma=0.9, decayRate=0.9, serverPath = "/home/leno/HFO/bin/"):
super(AdHoc, self).__init__(seed=seed,port=port,serverPath = serverPath)
self.name = "AdHoc"
self.visitTable = {}
self.advisedState = {}
self.budgetAsk = budgetAsk
self.budgetAdvise = budgetAdvise
self.stateImportanceMetric = stateImportanceMetric
def select_action(self, stateFeatures, state, noAdvice=False):
"""Changes the exploration strategy"""
if self.exploring and self.spentBudgetAsk < self.budgetAsk and stateFeatures[self.ABLE_KICK] == 1 and not noAdvice:
#Check if it should ask for advice
ask = self.check_ask(state)
if ask:
#----
#Ask for advice
#----
#In case the agent will communicate its intended action
if self.informAction:
normalAction = super(AdHoc, self).select_action(stateFeatures,state)
else:
normalAction = None
advised = self.adviceObject.ask_advice(self.get_Unum(),stateFeatures,normalAction)
if advised:
try:
self.advisedState[self.quantize_features(state)] = True
self.spentBudgetAsk = self.spentBudgetAsk + 1
action = self.combineAdvice(advised)
return action
except:
print "Exception when combining the advice " + str(advised)
#No need to compute two times the intended action
if self.informAction:
return normalAction
return super(AdHoc, self).select_action(stateFeatures,state,noAdvice)
@abc.abstractmethod
def check_advise(self,stateFeatures,state):
"""Returns if the agent should advice in this state.
The advised action is also returned in the positive case"""
#importance = self.state_importance(state,self.stateImportanceMetric)
#midpoint = self.midpoint(self.ADVISE)
#Calculates the probability
#prob = self.calc_prob_adv(importance,midpoint,self.ADVISE)
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#if importance>0:
#print str(importance)+" - "+str(prob)
##
#Check if the agent should advise
#if random.random() < prob and prob > 0.1:
#advisedAction = self.select_action(stateFeatures,state,True)
#return True,advisedAction
#return False,None
def combineAdvice(self,advised):
return int(max(set(advised), key=advised.count))
def state_importance(self,state,typeProb):
"""Calculates the state importance
state - the state
typeProb - is the state importance being calculated in regard to
the number of visits or also by Q-table values?"""
processedState = self.quantize_features(state)
numberVisits = self.number_visits(processedState)
if numberVisits == 0:
return 0.0
visitImportance = numberVisits / (numberVisits + math.log(self.scalingVisits + numberVisits))
if typeProb == self.VISIT_IMPORTANCE:
return visitImportance
elif typeProb==self.Q_IMPORTANCE:
maxQ = -float("inf")
minQ = float("inf")
#Get max and min Q value
actions = [self.DRIBBLE, self.SHOOT, self.PASSfar, self.PASSnear]
for act in actions:
if (processedState,act) in self.qTable:
actQ = self.qTable.get((processedState, act))
if actQ > maxQ:
maxQ = actQ
if actQ < minQ:
minQ = actQ
# print "MaxQ "+str(maxQ)
# print "MinQ "+str(minQ)
# print "len "+str(len(actions))
qImportance = math.fabs(maxQ - minQ) #* len(actions)
if qImportance==float('inf'):
return 0.0
#if qImportance != 0:
#print str(qImportance) + " - "+str(visitImportance)
return qImportance / (1-visitImportance)
#If the agent got here, it is an error
return None
def step(self, state, action):
"""Modifies the default step action just to include a state visit counter"""
if self.exploring:
processedState = self.quantize_features(state)
self.visitTable[processedState] = self.visitTable.get(processedState,0.0) + 1
status, statePrime, actionPrime = super(AdHoc, self).step(state,action)
self.lastStatus = status
if self.lastStatus != self.IN_GAME:
self.advisedState = {}
return status, statePrime, actionPrime
@abc.abstractmethod
def check_ask(self,state):
"""Returns if the agent should ask for advise in this state"""
#if self.exploring and not (self.quantize_features(state) in self.advisedState):
# importance = self.state_importance(state,self.VISIT_IMPORTANCE)
# midpoint = self.midpoint(self.ASK)
#Calculates the probability
# prob = self.calc_prob_adv(importance,midpoint,self.ASK)
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#print str(numberVisits)+" - "+str(prob)
##
# if random.random() < prob and prob > 0.1:
# return True
#return False
#Call default sarsa method if no action was selected
def calc_prob_adv(self,importance,midpoint,typeProb):
"""Calculates the probability of giving/receiving advice
importance - the current state importance
midpoint - the midpoint for the logistic function
typeProb - ASK or ADVISE
"""
signal = 1 if typeProb == self.ASK else -1
k = 10
prob = 1 / (1 + math.exp(signal * k * (importance-midpoint)))
return prob
def advise_action(self,uNum,state,adviseeAction=None):
"""Verifies if the agent can advice a friend, and return the action if possible"""
if self.spentBudgetAdvise < self.budgetAdvise:
#Check if the agent should advise
advise,advisedAction = self.check_advise(state,self.get_transformed_features(state))
if advise:
if adviseeAction is None or advisedAction!=adviseeAction:
self.spentBudgetAdvise = self.spentBudgetAdvise + 1
return advisedAction
return None
def setupAdvising(self,agentIndex,allAgents):
""" This method is called in preparation for advising """
self.adviceObject = AdviceUtil()
advisors = [x for i,x in enumerate(allAgents) if i!=agentIndex]
self.adviceObject.setupAdvisors(advisors)
def get_used_budget(self):
return self.spentBudgetAdvise
@abc.abstractmethod
def midpoint(self,typeMid):
"""Calculates the midpoint"""
pass
def number_visits(self,state):
return self.visitTable.get(state,0.0)
|
cowhi/HFO
|
experiments/agents/adhoc.py
|
Python
|
mit
| 8,788
|
[
"VisIt"
] |
1b5a91512b641bfae42bdf6d6e524a46467454899b3984cea42c52367fba7682
|
# $HeadURL$
__RCSID__ = "$Id$"
import os, stat, tempfile, shutil
from DIRAC import S_OK, S_ERROR, gConfig, rootPath, gLogger
import DIRAC.Core.Security.Locations as Locations
import DIRAC.Core.Security.File as File
from DIRAC.Core.Security.BaseSecurity import BaseSecurity
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities import List, Time, Os
class VOMS( BaseSecurity ):
def getVOMSAttributes( self, proxy, switch = "all" ):
"""
Return VOMS proxy attributes as list elements if switch="all" (default) OR
return the string prepared to be stored in DB if switch="db" OR
return the string of elements to be used as the option string in voms-proxy-init
if switch="option".
If a given proxy is a grid proxy, then function will return an empty list.
"""
# Get all possible info from voms proxy
result = self.getVOMSProxyInfo( proxy, "all" )
if not result["OK"]:
return S_ERROR( 'Failed to extract info from proxy: %s' % result[ 'Message' ] )
vomsInfoOutput = List.fromChar( result["Value"], "\n" )
#Get a list of known VOMS attributes
validVOMSAttrs = []
result = gConfig.getOptions( "/Registry/VOMS/Mapping" )
if result[ 'OK' ]:
for group in result[ 'Value' ]:
vA = gConfig.getValue( "/Registry/VOMS/Mapping/%s" % group, "" )
if vA and vA not in validVOMSAttrs:
validVOMSAttrs.append( vA )
result = gConfig.getSections( "/Registry/Groups" )
if result[ 'OK' ]:
for group in result[ 'Value' ]:
vA = gConfig.getValue( "/Registry/Groups/%s/VOMSRole" % group, "" )
if vA and vA not in validVOMSAttrs:
validVOMSAttrs.append( vA )
# Parse output of voms-proxy-info command
attributes = []
voName = ''
nickName = ''
for line in vomsInfoOutput:
fields = List.fromChar( line, ":" )
key = fields[0]
value = " ".join( fields[1:] )
if key == "VO":
voName = value
elif key == "attribute":
# Cut off unsupported Capability selection part
if value.find( "nickname" ) == 0:
nickName = "=".join( List.fromChar( value, "=" )[ 1: ] )
else:
value = value.replace( "/Capability=NULL" , "" )
value = value.replace( "/Role=NULL" , "" )
if value and value not in attributes and value in validVOMSAttrs:
attributes.append( value )
# Sorting and joining attributes
if switch == "db":
returnValue = ":".join( attributes )
elif switch == "option":
if len( attributes ) > 1:
returnValue = voName + " -order " + ' -order '.join( attributes )
elif attributes:
returnValue = voName + ":" + attributes[0]
else:
returnValue = voName
elif switch == 'nickname':
returnValue = nickName
elif switch == 'all':
returnValue = attributes
return S_OK( returnValue )
def getVOMSProxyFQAN( self, proxy ):
""" Get the VOMS proxy fqan attributes
"""
return self.getVOMSProxyInfo( proxy, "fqan" )
def getVOMSProxyInfo( self, proxy, option = False ):
validOptions = ['actimeleft', 'timeleft', 'identity', 'fqan', 'all']
if option:
if option not in validOptions:
S_ERROR( 'Non valid option %s' % option )
retVal = File.multiProxyArgument( proxy )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
try:
res = proxyDict[ 'chain' ].getVOMSData()
if not res[ 'OK' ]:
return res
data = res[ 'Value' ]
if option == 'actimeleft':
now = Time.dateTime()
left = data[ 'notAfter' ] - now
return S_OK( "%d\n" % left.total_seconds() )
if option == "timeleft":
now = Time.dateTime()
left = proxyDict[ 'chain' ].getNotAfterDate()[ 'Value' ] - now
return S_OK( "%d\n" % left.total_seconds() )
if option == "identity":
return S_OK( "%s\n" % data[ 'subject' ] )
if option == "fqan":
return S_OK( "\n".join( [ f.replace( "/Role=NULL", "" ).replace( "/Capability=NULL", "" ) for f in data[ 'fqan' ] ] ) )
if option == "all":
lines = []
creds = proxyDict[ 'chain' ].getCredentials()[ 'Value' ]
lines.append( "subject : %s" % creds[ 'subject' ] )
lines.append( "issuer : %s" % creds[ 'issuer' ] )
lines.append( "identity : %s" % creds[ 'identity' ] )
if proxyDict[ 'chain' ].isRFC():
lines.append( "type : RFC compliant proxy" )
else:
lines.append( "type : proxy" )
left = creds[ 'secondsLeft' ]
h = int( left / 3600 )
m = int( left / 60 ) - h * 60
s = int( left ) - m * 60 - h * 3600
lines.append( "timeleft : %s:%s:%s\nkey usage : Digital Signature, Key Encipherment, Data Encipherment" % ( h, m, s ) )
lines.append( "== VO %s extension information ==" % data[ 'vo' ] )
lines.append( "VO: %s" % data[ 'vo' ] )
lines.append( "subject : %s" % data[ 'subject' ] )
lines.append( "issuer : %s" % data[ 'issuer' ] )
for fqan in data[ 'fqan' ]:
lines.append( "attribute : %s" % fqan )
lines.append( "attribute : %s" % data[ 'attribute' ] )
now = Time.dateTime()
left = ( data[ 'notAfter' ] - now ).total_seconds()
h = int( left / 3600 )
m = int( left / 60 ) - h * 60
s = int( left ) - m * 60 - h * 3600
lines.append( "timeleft : %s:%s:%s" % ( h, m , s ) )
return S_OK( "\n".join( lines ) )
else:
return S_ERROR( "NOT IMP" )
finally:
if proxyDict[ 'tempFile' ]:
self._unlinkFiles( proxyDict[ 'tempFile' ] )
def OLDgetVOMSProxyInfo( self, proxy, option = False ):
""" Returns information about a proxy certificate (both grid and voms).
Available information is:
1. Full (grid)voms-proxy-info output
2. Proxy Certificate Timeleft in seconds (the output is an int)
3. DN
4. voms group (if any)
@type proxy: a string
@param proxy: the proxy certificate location.
@type option: a string
@param option: None is the default value. Other option available are:
- timeleft
- actimeleft
- identity
- fqan
- all
@rtype: tuple
@return: status, output, error, pyerror.
"""
validOptions = ['actimeleft', 'timeleft', 'identity', 'fqan', 'all']
if option:
if option not in validOptions:
S_ERROR( 'Non valid option %s' % option )
retVal = File.multiProxyArgument( proxy )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
# chain = proxyDict[ 'chain' ]
proxyLocation = proxyDict[ 'file' ]
if not Os.which("voms-proxy-info"):
return S_ERROR("Missing voms-proxy-info")
cmd = 'voms-proxy-info -file %s' % proxyLocation
if option:
cmd += ' -%s' % option
result = shellCall( self._secCmdTimeout, cmd )
if proxyDict[ 'tempFile' ]:
self._unlinkFiles( proxyLocation )
if not result['OK']:
return S_ERROR( 'Failed to call voms-proxy-info' )
status, output, error = result['Value']
# FIXME: if the local copy of the voms server certificate is not up to date the command returns 0.
# the stdout needs to be parsed.
if status:
gLogger.warn( 'Failed to execute:', cmd )
gLogger.warn( 'Exit code:', status )
gLogger.warn( 'StdOut' , output )
gLogger.warn( 'StdErr' , error )
if error.find( 'VOMS extension not found' ) == -1 and \
not error.find( 'WARNING: Unable to verify signature! Server certificate possibly not installed.' ) == 0:
return S_ERROR( 'Failed to get proxy info. Command: %s; StdOut: %s; StdErr: %s' % ( cmd, output, error ) )
if option == 'fqan':
if output:
output = output.split( '/Role' )[0]
else:
output = '/lhcb'
return S_OK( output )
def getVOMSESLocation( self ):
#755
requiredDirPerms = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
#644
requiredFilePerms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
#777
allPerms = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
vomsesPaths = []
if 'DIRAC_VOMSES' in os.environ:
vomsesPaths.append( os.environ[ 'DIRAC_VOMSES' ] )
vomsesPaths.append( os.path.join( rootPath, "etc", "grid-security", "vomses" ) )
for vomsesPath in vomsesPaths:
if not os.path.exists( vomsesPath ):
continue
if os.path.isfile( vomsesPath ):
pathMode = os.stat( vomsesPath )[ stat.ST_MODE ]
if ( pathMode & allPerms ) ^ requiredFilePerms == 0:
return vomsesPath
fd, tmpPath = tempfile.mkstemp( "vomses" )
os.close( fd )
shutil.copy( vomsesPath , tmpPath )
os.chmod( tmpPath, requiredFilePerms )
os.environ[ 'DIRAC_VOMSES' ] = tmpPath
return tmpPath
elif os.path.isdir( vomsesPath ):
ok = True
pathMode = os.stat( vomsesPath )[ stat.ST_MODE ]
if ( pathMode & allPerms ) ^ requiredDirPerms:
ok = False
if ok:
for fP in os.listdir( vomsesPath ):
pathMode = os.stat( os.path.join( vomsesPath, fP ) )[ stat.ST_MODE ]
if ( pathMode & allPerms ) ^ requiredFilePerms:
ok = False
break
if ok:
return vomsesPath
tmpDir = tempfile.mkdtemp()
tmpDir = os.path.join( tmpDir, "vomses" )
shutil.copytree( vomsesPath, tmpDir )
os.chmod( tmpDir, requiredDirPerms )
for fP in os.listdir( tmpDir ):
os.chmod( os.path.join( tmpDir, fP ), requiredFilePerms )
os.environ[ 'DIRAC_VOMSES' ] = tmpDir
return tmpDir
def setVOMSAttributes( self, proxy, attribute = None, vo = None ):
""" Sets voms attributes to a proxy
"""
if not vo:
return S_ERROR( "No vo specified, and can't get default in the configuration" )
retVal = File.multiProxyArgument( proxy )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
chain = proxyDict[ 'chain' ]
proxyLocation = proxyDict[ 'file' ]
secs = chain.getRemainingSecs()[ 'Value' ] - 300
if secs < 0:
return S_ERROR( "Proxy length is less that 300 secs" )
hours = int( secs / 3600 )
mins = int( ( secs - hours * 3600 ) / 60 )
retVal = self._generateTemporalFile()
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return retVal
newProxyLocation = retVal[ 'Value' ]
cmdArgs = []
if chain.isLimitedProxy()[ 'Value' ]:
cmdArgs.append( '-limited' )
cmdArgs.append( '-cert "%s"' % proxyLocation )
cmdArgs.append( '-key "%s"' % proxyLocation )
cmdArgs.append( '-out "%s"' % newProxyLocation )
if attribute and attribute != 'NoRole':
cmdArgs.append( '-voms "%s:%s"' % ( vo, attribute ) )
else:
cmdArgs.append( '-voms "%s"' % vo )
cmdArgs.append( '-valid "%s:%s"' % ( hours, mins ) )
tmpDir = False
vomsesPath = self.getVOMSESLocation()
if vomsesPath:
cmdArgs.append( '-vomses "%s"' % vomsesPath )
if not Os.which('voms-proxy-init'):
return S_ERROR("Missing voms-proxy-init")
cmd = 'voms-proxy-init %s' % " ".join( cmdArgs )
result = shellCall( self._secCmdTimeout, cmd )
if tmpDir:
shutil.rmtree( tmpDir )
File.deleteMultiProxy( proxyDict )
if not result['OK']:
self._unlinkFiles( newProxyLocation )
return S_ERROR( 'Failed to call voms-proxy-init: %s' % result['Message'] )
status, output, error = result['Value']
if status:
self._unlinkFiles( newProxyLocation )
return S_ERROR( 'Failed to set VOMS attributes. Command: %s; StdOut: %s; StdErr: %s' % ( cmd, output, error ) )
newChain = X509Chain()
retVal = newChain.loadProxyFromFile( newProxyLocation )
self._unlinkFiles( newProxyLocation )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't load new proxy: %s" % retVal[ 'Message' ] )
return S_OK( newChain )
def vomsInfoAvailable( self ):
"""
Is voms info available?
"""
if not Os.which("voms-proxy-info"):
return S_ERROR("Missing voms-proxy-info")
cmd = 'voms-proxy-info -h'
result = shellCall( self._secCmdTimeout, cmd )
if not result['OK']:
return False
status, output, error = result['Value']
if status:
return False
return True
|
marcelovilaca/DIRAC
|
Core/Security/VOMS.py
|
Python
|
gpl-3.0
| 12,587
|
[
"DIRAC"
] |
6161f79cee05ca8ec2c90cd91696a9297d4091adf58df48d7a4b7782499ba411
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Query-Parser.
# Copyright (C) 2014, 2016 CERN.
#
# Invenio-Query-Parser is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Query-Parser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Define abstract classes."""
class BinaryOp(object):
def __init__(self, left, right):
self.left = left
self.right = right
def accept(self, visitor):
return visitor.visit(self,
self.left.accept(visitor),
self.right.accept(visitor))
def __eq__(self, other):
return (
type(self) == type(other)
) and (
self.left == other.left
) and (
self.right == other.right
)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self.left), repr(self.right))
class UnaryOp(object):
def __init__(self, op):
self.op = op
def accept(self, visitor):
return visitor.visit(self, self.op.accept(visitor))
def __eq__(self, other):
return type(self) == type(other) and self.op == other.op
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.op))
class ListOp(object):
def __init__(self, children):
try:
iter(children)
except TypeError:
self.children = [children]
else:
self.children = children
def accept(self, visitor):
return visitor.visit(self, [c.accept(visitor) for c in self.children])
def __eq__(self, other):
return type(self) == type(other) and self.op == other.op
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.children))
class Leaf(object):
def __init__(self, value):
self.value = value
def accept(self, visitor):
return visitor.visit(self)
def __eq__(self, other):
return type(self) == type(other) and self.value == other.value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.value))
# Concrete classes
class BinaryKeywordBase(BinaryOp):
@property
def keyword(self):
# FIXME evaluate if it's possible to move it out to spires module
from .contrib.spires.ast import SpiresOp
if self.left:
if isinstance(self.left, SpiresOp):
return self.left.keyword
elif isinstance(self.right, SpiresOp):
return self.right.keyword
return None
class AndOp(BinaryKeywordBase):
pass
class OrOp(BinaryKeywordBase):
pass
class NotOp(UnaryOp):
@property
def keyword(self):
return getattr(self.op, 'keyword')
class RangeOp(BinaryOp):
pass
class LowerOp(UnaryOp):
pass
class LowerEqualOp(UnaryOp):
pass
class GreaterOp(UnaryOp):
pass
class GreaterEqualOp(UnaryOp):
pass
class KeywordOp(BinaryOp):
pass
class NestedKeywordsRule(BinaryOp):
pass
class ValueQuery(UnaryOp):
pass
class Keyword(Leaf):
pass
class Value(Leaf):
pass
class SingleQuotedValue(Leaf):
pass
class DoubleQuotedValue(Leaf):
pass
class RegexValue(Leaf):
pass
class EmptyQuery(Leaf):
pass
|
tiborsimko/invenio-query-parser
|
invenio_query_parser/ast.py
|
Python
|
gpl-2.0
| 4,058
|
[
"VisIt"
] |
7aa762e805e5c05eb3bbde0f77a1d211eec8c05c9229273b6efb5016520a9a47
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
from otsvm import *
# instantiate a kernel=Gaussian with sigma = 2
kernel = ExponentialRBF(2)
x = Point(2, 2.0)
y = Point(2, 1.0)
print(' kernel ([2 2],[1 1]) = %.12g' % kernel(x, y))
print(' dkernel/dx_i([2 2],[1 1]) = ', repr(kernel.partialGradient(x, y)))
print(' d2kernel/(dx_i*dx_j)([2 2],[1 1]) = ', repr(
kernel.partialHessian(x, y)))
x[0] = 0.0
x[1] = 5.0
y[0] = 0.0
y[1] = 3.0
print(' kernel ([0 5],[0 3]) = %.12g' % kernel(x, y))
print(' dkernel/dx_i([0 5],[0 3]) = ', repr(kernel.partialGradient(x, y)))
print(' d2kernel/(dx_i*dx_j)([0 5],[0 3]) = ', repr(
kernel.partialHessian(x, y)))
|
openturns/otsvm
|
python/test/t_ExponentialRBF_std.py
|
Python
|
lgpl-3.0
| 696
|
[
"Gaussian"
] |
21ece5697b48ab21176570cf8fabc85746acfdc6aeb4c62caed223c2c41140c3
|
#!/usr/bin/env python
# this script writes out WB to Repbase blast matches and takes those with no hit to repbase and blasts them agianst one another
# USE:collapse_WB_REPB.py <blast_comparison_file>
# example:/lscr2/andersenlab/kml436/git_repos2/Transposons/files$ python ../scripts/collapse_WB_REPB.py WB_REPB_blast_family_comparison.txt
import sys
import re
import os
from subprocess import Popen, PIPE
blast_compare_file = sys.argv[1]
BLAST_COMPARE_FILE = open(blast_compare_file, "r")
INT1 = open("intermediate_family_matches_e0.txt", "w")
INT2 = open("intermediate_e0.txt", "w")
INT1.write("WB_ID\tWB_family\tREPB_family\tevalue\n")
INT2.write("WB_ID\tWB_family\tREPB_family\tevalue\n")
INT3 = open("intermediate_non_family_match.txt", "w")
INT3.write("WB_TE\tWB_Family\tBlast_Hit\tPercent_Identity\tAlng_Length\tMismatches\tGap_Open\tQuery_Start\tQuery_End\tSubject_Start\tSubject_End\tE_Value\tBit_Score\n")
repb_te_families ={}
wb_te_families ={}
wb_no_match={}
wb_no_match_familes={}
firstline = True
IDs_in_blast={}
for line in BLAST_COMPARE_FILE:
if firstline: #skip first line
firstline = False
continue
items= re.split("[\t]",line)
WB_ID = items[0]
WB_family = items[1]
REPB_family = items[2]
evalue = float(items[11])
#write out blast hits where best hit Repbase family matched the Wormbase family and the evale was zero
if WB_family == REPB_family and evalue == 0.0: ##AND IF EVALUSE IS ZERO
print "YES"
print line
repb_te_families[REPB_family] = 0
wb_te_families[WB_family] = 0
INT1.write("{WB_ID}\t{WB_family}\t{REPB_family}\t{evalue}\n".format(**locals()))
#write out blast hits where the families did not match but the evalue was zero (cases where Wormabse family is represented by a Repabse family by a differnet name)
elif evalue == 0.0:
print "evalue is {evalue}".format(**locals())
print line
repb_te_families[REPB_family] = 0
wb_te_families[WB_family] = 0
INT2.write("{WB_ID}\t{WB_family}\t{REPB_family}\t{evalue}\n".format(**locals()))
#add to dictionaries all hits that did not the above 2 conditions
else:
wb_no_match_familes[WB_ID]= WB_family
wb_no_match[WB_ID]=line
#for each key in dictionary, ignore those that have a family already represented in repbase or whose family matches a wormbase family already represented
final_no_matches={}
for key in wb_no_match_familes.keys():
family = wb_no_match_familes[key]
if family in repb_te_families.keys() or family in wb_te_families.keys():
print "Family Already Represented"
#write all other blast hits not matching the above condition to a new file and a new dictionary
else:
value = wb_no_match[key]
INT3.write(value)
final_no_matches[key] = 0
#write the WB transposon fastas with no mathces to a new file
NO_MATCH = open("no_match.fasta", "w")
from Bio import SeqIO
fasta_sequences = SeqIO.parse(open("WB_all_seqs.fasta"),'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
#name, sequence = fasta.id, fasta.seq.tostring()
if name in final_no_matches.keys():
NO_MATCH.write(">" + name + "\n" + sequence + "\n")
NO_MATCH.close()
#make blast databse of these seqeucnes and blast these against one another
#os.system("/lscr2/andersenlab/kml436/ncbi-blast-2.2.30+/bin/makeblastdb -in no_match.fasta -dbtype nucl -out no_match_database")
result, err = Popen(['/lscr2/andersenlab/kml436/ncbi-blast-2.2.30+/bin/makeblastdb -in no_match.fasta -dbtype nucl -out no_match_database'], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(['/lscr2/andersenlab/kml436/ncbi-blast-2.2.30+/bin/blastn -db no_match_database -query no_match.fasta -evalue 1 -outfmt 6 -max_target_seqs 10 -out no_match_blast.txt -num_threads 22'], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#blast_fields= '\t'.join(items[1:12])
#print blast_fields
#IDs_in_blast[transposon_ID] = blast_fields
|
klaricch/Transposons2
|
scripts/collapse_WB_REPB.py
|
Python
|
mit
| 3,872
|
[
"BLAST"
] |
4b59981e378b4d54ce31125e4c0481b219730d62f7c0be64b2fa6e2197cbd2c2
|
"""
@created_at 2014-07-17
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
Basado en el trabajo de Juan Bekios-Calfa <juan.bekios@ucn.cl>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import pylab
import numpy as np
from graph_exception import GraphException
class Graph:
"""Esta clase implementa los graficos solicitados en el taller
para los resultados del clasificador.
"""
def __init__(self, original_data, data_classes):
"""Este constructor carga los datos requeridis por la clase Graph.
:param original_data: Conjunto de valores a graficar.
:param data_classes: Conjunto de valores de clases asociados a cada elemento del conjunto a graficar.
"""
# Se cargan los datos originales y ademas los pertenecientes a cada clase.
self._data = original_data
self._data_positive = original_data[data_classes == 1]
self._data_negative = original_data[data_classes == 0]
# Se inicializan en vacio las variables requeridas para el funcionamiento del grafico de probabilidades.
self._n, self._bins, self._patches = (None,)*3
def frequencies_histogram(self):
"""Este metodo construye un histograma de frecuencias en base los datos cargados.
"""
# Grafico de frecuencias del conjunto LDA por clase
# http://matplotlib.org/examples/pylab_examples/histogram_demo_extended.html
pylab.figure()
self._n, self._bins, self._patches = pylab.hist([self._data_positive, self._data_negative], 30, histtype='bar',
color=['red', 'blue'],
label=['$\mathit{positivo}$', '$\mathit{negativo}$'])
pylab.legend()
pylab.xlabel('Caracteristicas LDA')
pylab.ylabel('Frecuencia')
pylab.title('Histograma de frecuencias LDA: Conjunto de entrenamiento')
def probability_density_functions(self):
"""Este metodo construye el grafico de las funciones de densidad de probabilidad
en base los datos cargados.
"""
if self._bins is None:
raise GraphException("Debe calcular el histograma primero.")
# --- Probabilidades
pylab.figure()
#print bins
#print n
#prob_positive = n[0]/np.sum(n[0])
#prob_negative = n[1]/np.sum(n[1])
#pylab.bar(left=bins[:-1], height=prob_positive, width=0.1, bottom=None, hold=None, color='red', alpha=0.5)
#pylab.bar(left=bins[:-1], height=prob_negative, width=0.1, bottom=None, hold=None, color='blue', alpha=0.5)
# add a 'best fit' line
mu_positive = np.mean(self._data_positive)
sigma_positive = np.std(self._data_positive)
y_positive = pylab.mlab.normpdf(self._bins, mu_positive, sigma_positive)
#Otra normal...
mu_negative = np.mean(self._data_negative)
sigma_negative = np.std(self._data_negative)
y_negative = pylab.mlab.normpdf(self._bins, mu_negative, sigma_negative)
#Clases...
pylab.plot(self._bins, y_positive, 'r--', label='$\mathcal{P}(LDA|D^+)$')
pylab.plot(self._bins, y_negative, 'b--', label='$\mathcal{P}(LDA|D^-)$')
#Etiquetas...
pylab.xlabel('$LDA$')
pylab.ylabel('$P(LDA|DIABETES)$')
pylab.legend()
def conditional_probability(self, x, y):
"""Este metodo construye el grafico de las funciones de probabilidad usando Bayes.
"""
# --- Probabilidades
pylab.figure()
#Clases...
pylab.scatter(x, y[:,0], color='blue', label='$\mathcal{P}(D^-|LDA)$')
pylab.scatter(x, y[:,1], color='red', label='$\mathcal{P}(D^+|LDA)$')
#Etiquetas...
pylab.xlabel('$LDA$')
pylab.ylabel('$P(DIABETES|LDA)$')
pylab.legend()
pylab.grid()
def show_graphs(self):
"""Despliega los graficos
"""
pylab.show()
|
efulet/pca
|
pca/lib/graph.py
|
Python
|
mit
| 4,256
|
[
"Brian"
] |
b5991a93cc46e26b66ac3da6ff7e207a08544ee89c6ad50858c9fb56f9c2a08e
|
# Copyright (C) 2016 Lewis, Peloton
########################################################################
# Python script to compute CMB weak lensing biases (N0, N1)
# and derivatives. Internal computations are done in Fortran.
# Authors: (Fortran) Antony Lewis, (Python, and f2py) Julien Peloton
# Contact: j.peloton@sussex.ac.uk
########################################################################
from LensingBiases_f import lensingbiases as lensingbiases_f
from LensingBiases_f import checkproc as checkproc_f
import os
import glob
import matplotlib
matplotlib.use("Agg")
import numpy as np
import pylab as pl
pl.ioff()
import argparse
def addargs(parser):
''' Parse command line arguments '''
parser.add_argument(
'-phifile',
dest='phifile',
help='CAMB file containing the fiducial lensing potential',
required=True)
parser.add_argument(
'-lensedcmbfile',
dest='lensedcmbfile',
help='CAMB file containing the fiducial lensed spectra',
required=True)
parser.add_argument(
'-FWHM',
dest='FWHM',
help='Beam width (FWHM) in arcmin',
required=True,
type=float)
parser.add_argument(
'-noise_level',
dest='noise_level',
help='Temperature noise level (uk.arcmin). Polar is sqrt(2) bigger.',
required=True,
type=float)
parser.add_argument(
'-lmin',
dest='lmin',
help='Minimum multipole',
default=2)
parser.add_argument(
'-lmaxout',
dest='lmaxout',
help='Maximum multipole for the output',
default=500)
parser.add_argument(
'-lmax',
dest='lmax',
help='Maximum multipole for the computation',
default=500)
parser.add_argument(
'-lmax_TT',
dest='lmax_TT',
help='Maximum multipole for temperature',
default=500)
parser.add_argument(
'-lcorr_TT',
dest='lcorr_TT',
help='Cut-off in ell to simulate correlated noise at low-ell. \
Not used by default. Add (1+(lcorr_TT/ell)**4 otherwise)',
default=0)
parser.add_argument(
'-tmp_output',
dest='tmp_output',
help='Output folder, where files will be written',
default='./toto')
def grabargs(args_param=None):
''' Parse command line arguments '''
parser = argparse.ArgumentParser(description='Package to compute N0 and N1 lensing biases.')
addargs(parser)
args = parser.parse_args(args_param)
return args
def checkproc_py():
'''
Routine to check the number of processors involved
in the computation (Fortran routines use openmp).
'''
nproc = checkproc_f.get_threads()
if nproc > 1:
print 'You are using ', nproc, ' processors'
else:
print '###################################'
print 'You are using ', nproc, ' processor'
print 'If you want to speed up the computation,'
print 'set up correctly your number of task.'
print 'e.g in bash, if you want to use n procs,'
print 'add this line to your bashrc:'
print 'export OMP_NUM_THREADS=n'
print '###################################'
def compute_n0_py(
from_args=None,
phifile=None,
lensedcmbfile=None,
FWHM=None,
noise_level=None,
lmin=None,
lmaxout=None,
lmax=None,
lmax_TT=None,
lcorr_TT=None,
tmp_output=None):
"""
Routine to compute the N0 Gaussian bias.
It calls internally the Fortran routine for speed-up.
Input:
* from_args: class, contains all argument for the routine (see addargs).
If specified, you do not have to specified other arguments.
* phifile: string, path to the file containing the fiducial lensing potential
* lensedcmbfile: string, path to the file containing the fiducial lensed CMB spectra
* FWHM: float, beam width in arcmin
* noise_level: float, Temperature noise level (uk.arcmin). Polar is sqrt(2) bigger.
* lmin: int, Minimum multipole
* lmaxout: int, Maximum multipole for the output
* lmax: int, Maximum multipole for the computation
* lmax_TT: int, Maximum multipole for temperature
* lcorr_TT: int, Cut-off in ell for correlated noise (put zero if not wanted)
* tmp_output: string: Output folder, where files will be written
Output:
* return bins, lensing potential, matrix containing
all N0s, and names of spectra ordered.
"""
if from_args is not None:
lensingbiases_f.compute_n0(
from_args.phifile,
from_args.lensedcmbfile,
from_args.FWHM/60.,
from_args.noise_level,
from_args.lmin,
from_args.lmaxout,
from_args.lmax,
from_args.lmax_TT,
from_args.lcorr_TT,
from_args.tmp_output)
n0 = np.loadtxt(os.path.join(from_args.tmp_output, 'N0_analytical.dat')).T
else:
lensingbiases_f.compute_n0(
phifile,
lensedcmbfile,
FWHM/60.,
noise_level,
lmin,
lmaxout,
lmax,
lmax_TT,
lcorr_TT,
tmp_output)
n0 = np.loadtxt(os.path.join(tmp_output, 'N0_analytical.dat')).T
indices = ['TT', 'EE', 'EB', 'TE', 'TB', 'BB']
bins = n0[0]
phiphi = n0[1]
n0_mat = np.reshape(n0[2:], (len(indices), len(indices), len(bins)))
return bins, phiphi, n0_mat, indices
def compute_n1_py(
from_args=None,
phifile=None,
lensedcmbfile=None,
FWHM=None,
noise_level=None,
lmin=None,
lmaxout=None,
lmax=None,
lmax_TT=None,
lcorr_TT=None,
tmp_output=None):
"""
Routine to compute the N1 bias.
It calls internally the Fortran routine for speed-up.
Input:
* from_args: class, contains all argument for the routine (see addargs).
If specified, you do not have to specified other arguments.
* phifile: string, path to the file containing the fiducial lensing potential
* lensedcmbfile: string, path to the file containing the fiducial lensed CMB spectra
* FWHM: float, beam width in arcmin
* noise_level: float, Temperature noise level (uk.arcmin). Polar is sqrt(2) bigger.
* lmin: int, Minimum multipole
* lmaxout: int, Maximum multipole for the output
* lmax: int, Maximum multipole for the computation
* lmax_TT: int, Maximum multipole for temperature
* lcorr_TT: int, Cut-off in ell for correlated noise (put zero if not wanted)
* tmp_output: string: Output folder, where files will be written
Output:
* return bins, lensing potential, matrix containing
all N1s, and names of spectra ordered.
"""
if from_args is not None:
lensingbiases_f.compute_n1(
from_args.phifile,
from_args.lensedcmbfile,
from_args.FWHM/60.,
from_args.noise_level,
from_args.lmin,
from_args.lmaxout,
from_args.lmax,
from_args.lmax_TT,
from_args.lcorr_TT,
from_args.tmp_output)
n1 = np.loadtxt(os.path.join(from_args.tmp_output, 'N1_All_analytical.dat')).T
else:
lensingbiases_f.compute_n1(
phifile,lensedcmbfile,
FWHM/60.,
noise_level,
lmin,
lmaxout,
lmax,
lmax_TT,
lcorr_TT,
tmp_output)
n1 = np.loadtxt(os.path.join(tmp_output, 'N1_All_analytical.dat')).T
indices = ['TT', 'EE', 'EB', 'TE', 'TB', 'BB']
bins = n1[0]
n1_mat = np.reshape(n1[1:], (len(indices), len(indices), len(bins)))
return bins, n1_mat, indices
def compute_n1_derivatives_py(
from_args=None,
phifile=None,
lensedcmbfile=None,
FWHM=None,
noise_level=None,
lmin=None,
lmaxout=None,
lmax=None,
lmax_TT=None,
lcorr_TT=None,
tmp_output=None):
"""
Routine to compute the derivatives of N1 bias wrt lensing potential power-spectrum.
It calls internally the Fortran routine for speed-up.
Input:
* from_args: class, contains all argument for the routine (see addargs).
If specified, you do not have to specified other arguments.
* phifile: string, path to the file containing the fiducial lensing potential
* lensedcmbfile: string, path to the file containing the fiducial lensed CMB spectra
* FWHM: float, beam width in arcmin
* noise_level: float, Temperature noise level (uk.arcmin). Polar is sqrt(2) bigger.
* lmin: int, Minimum multipole
* lmaxout: int, Maximum multipole for the output
* lmax: int, Maximum multipole for the computation
* lmax_TT: int, Maximum multipole for temperature
* lcorr_TT: int, Cut-off in ell for correlated noise (put zero if not wanted)
* tmp_output: string: Output folder, where files will be written
Output:
* return bins, lensing potential, matrix containing
all N0s, and names of spectra ordered.
"""
if from_args is not None:
lensingbiases_f.compute_n1_derivatives(
from_args.phifile,
from_args.lensedcmbfile,
from_args.FWHM/60.,
from_args.noise_level,
from_args.lmin,
from_args.lmaxout,
from_args.lmax,
from_args.lmax_TT,
from_args.lcorr_TT,
from_args.tmp_output)
# n1 = np.loadtxt(os.path.join(args.tmp_output,'N1_All_analytical.dat')).T
else:
lensingbiases_f.compute_n1_derivatives(
phifile,
lensedcmbfile,
FWHM/60.,
noise_level,
lmin,
lmaxout,
lmax,
lmax_TT,
lcorr_TT,
tmp_output)
# n1 = np.loadtxt(os.path.join(tmp_output,'N1_All_analytical.dat')).T
# indices = ['TT','EE','EB','TE','TB','BB']
# bins = n1[0]
# n1_mat = np.reshape(n1[1:],(len(indices),len(indices),len(bins)))
# return bins, n1_mat, indices
def minimum_variance_n0(N0_array, N0_names, checkit=False):
'''
Compute the variance of the minimum variance estimator and the associated weights.
Input:
* N0_array: ndarray, contain the N0s to combine
* N0_names: ndarray of string, contain the name of the N0s to combine (['TTTT', 'EEEE', etc.])
Output:
* minimum_variance_n0: 1D array, the MV N0
* weights*minimum_variance_n0: ndarray, the weights for each spectrum (TT, EE, etc.)
* N0_names_ordered: 1D array, contain the name of the spectra (TT, EE, etc.)
'''
N0_array = np.reshape(N0_array, (len(N0_array)**2, len(N0_array[0][0])))
N0_names_full = ['%s%s'%(i, j) for i in N0_names for j in N0_names]
## Fill matrix
sub_vec = [[name, pos] for pos, name in enumerate(N0_names)]
dic_mat = {'%s%s'%(XY, ZW):[i, j] for XY, i in sub_vec for ZW, j in sub_vec}
## Build the inverse matrix for each ell
def build_inv_submatrix(vector_ordered, names_ordered, dic_mat, nsub_element):
mat = np.zeros((nsub_element, nsub_element))
for pos, name in enumerate(names_ordered):
mat[dic_mat[name][0]][dic_mat[name][1]] = mat[dic_mat[name][1]][dic_mat[name][0]] = vector_ordered[pos]
return np.linalg.pinv(mat)
inv_submat_array = np.array([
build_inv_submatrix(
vector_ordered,
N0_names_full,
dic_mat,
len(N0_names)) for vector_ordered in np.transpose(N0_array)])
inv_N0_array = np.array([ np.sum(submat) for submat in inv_submat_array ])
minimum_variance_n0 = 1. / inv_N0_array
weights = np.array([[np.sum(submat[i]) for submat in inv_submat_array] for i in range(len(sub_vec))])
if checkit:
print 'Sum of weights = ', np.sum(weights * minimum_variance_n0) / len(minimum_variance_n0)
print 'Is sum of weights 1? ',np.sum(weights * minimum_variance_n0) / len(minimum_variance_n0) == 1.0
return minimum_variance_n0, weights * minimum_variance_n0
def minimum_variance_n1(bins, N1_array, weights_for_MV, spectra_names, bin_function=None):
'''
Takes all N1 and form the mimimum variance estimator.
Assumes N1 structure is coming from Biases_n1mat.f90
Input:
* N1: ndarray, contain the N1 (output of Biases_n1mat.f90)
* weights_for_MV: ndarray, contain the weights used for MV
* spectra_names: ndarray of string, contain the name of the spectra ordered
'''
## Ordering: i_TT=0,i_EE=1,i_EB=2,i_TE=3,i_TB=4, i_BB=5 (from Frotran)
names_N1 = ['%s%s'%(i, j) for i in spectra_names for j in spectra_names]
if bin_function is not None:
n1_tot = np.zeros_like(bin_centers)
else:
n1_tot = np.zeros_like(weights_for_MV[0])
for estimator_name in names_N1:
## Indices for arrays
index_x = spectra_names.index(estimator_name[0:2])
index_y = spectra_names.index(estimator_name[2:])
## Interpolate N1 if necessary
n1_not_interp = N1_array[index_x][index_y]
if bin_function is not None:
n1_interp = np.interp(bin_centers, bins, n1_not_interp)
else:
n1_interp = n1_not_interp
## Weights
wXY_index = spectra_names.index(estimator_name[0:2])
wZW_index = spectra_names.index(estimator_name[2:4])
## Update N1
if bin_function is not None:
n1_tot += bin_function(weights_for_MV[wXY_index]) * bin_function(weights_for_MV[wZW_index]) * n1_interp
else:
n1_tot += weights_for_MV[wXY_index] * weights_for_MV[wZW_index] * n1_interp
return n1_tot
def plot_biases(bins, phiphi, MV_n0, MV_n1=None, N0_array=None, N1_array=None):
'''
Quick plot for inspection
'''
tphi = lambda l: (l + 0.5)**4 / (2. * np.pi) # scaling to apply to cl_phiphi when plotting.
colors = lambda i: matplotlib.cm.jet(i * 60)
## Plot lensing
pl.loglog(bins, phiphi, color='grey', label='Lensing')
## Plot N0
pl.loglog(bins, MV_n0 * tphi(bins), color='black', lw=2, label='N0 bias')
if N0_array is not None:
indices = ['TT','EE','EB','TE','TB','BB']
for i in range(len(N0_array)):
pl.loglog(
bins,
N0_array[i][i][:] * tphi(bins),
color=colors(i),
lw=2,
alpha=0.2,
label=indices[i]+indices[i])
## Plot N1
if MV_n1 is not None:
pl.loglog(bins, MV_n1 * tphi(bins), color='black', lw=2, ls='--', label='N1 bias')
if N1_array is not None:
indices = ['TT','EE','EB','TE','TB','BB']
for i in range(len(N1_array)):
pl.loglog(
bins,
N1_array[i][i][:] * tphi(bins),
color=colors(i),
ls='--',
lw=2,
alpha=0.2,
label=indices[i]+indices[i])
pl.xlabel('$\ell$', fontsize=20)
pl.ylabel(r"$[\ell(\ell+1)]^2/(2\pi)C_\ell^{\phi^{XY} \phi^{ZW}}$", fontsize=20)
leg=pl.legend(loc='best', ncol=2, fontsize=12.5)
leg.get_frame().set_alpha(0.0)
pl.savefig('Biases.pdf')
pl.clf()
if __name__ == "__main__":
args_param = None
args = grabargs(args_param)
## Check openmp
checkproc_py()
## Compute N0s, and form MV
## Example with argparse
bins, phiphi, n0_mat, indices = compute_n0_py(from_args=args)
## Example with direct arguments
# bins, phiphi, n0_mat, indices = compute_n0_py(from_args=None,phifile=args.phifile,lensedcmbfile=args.lensedcmbfile,
# FWHM=args.FWHM,noise_level=args.noise_level,
# lmin=args.lmin,lmaxout=args.lmaxout,lmax=args.lmax,lmax_TT=args.lmax_TT,
# tmp_output=args.tmp_output)
MV_n0, weights = minimum_variance_n0(n0_mat, indices, checkit=False)
## Compute N1s, and form MV
bins, n1_mat, indices = compute_n1_py(from_args=args)
MV_n1 = minimum_variance_n1(bins, n1_mat, weights, indices, bin_function=None)
## Compute derivatives of N1s (also compute N1)
compute_n1_derivatives_py(from_args=args)
plot_biases(bins, phiphi, MV_n0, MV_n1=MV_n1, N0_array=n0_mat, N1_array=n1_mat)
|
JulienPeloton/lensingbiases
|
LensingBiases.py
|
Python
|
gpl-3.0
| 14,283
|
[
"Gaussian"
] |
6c69230c7552ddc01cec99a636704bf277e1791f219b680441bbe5f421eba8b9
|
# modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
# NO plotting - just saving the results: LOG-response spectra for each sigma and max-LOG response numerical spectra
# pre-convolved with a gaussian filter of sigma=10
import os, shutil
import time, datetime
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime] #fix
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
march2014wrf = ob.march2014wrf
march2014wrf.fix()
################################################################################
# hack
#kongrey.list = [v for v in kongrey.list if v.dataTime>="20130828.2320"]
################################################################################
# parameters
testName = "modifiedMexicanHatTest15_march2014wrf"
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
dbzstreams = [march2014wrf]
sigmaPower=0
scaleSpacePower=0 #2014-05-14
testScriptsFolder = dp.root + 'python/armor/tests/'
sigmaPreprocessing = 10 # sigma for preprocessing, 2014-05-15
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
# end parameters
################################################################################
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in dbzstreams:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
#hack
#streamMean = np.array([135992.57472004235, 47133.59049120619, 16685.039217734946, 11814.043851969862, 5621.567482638702, 3943.2774923729303, 1920.246102887001, 1399.7855335686243, 760.055614122099, 575.3654495432361, 322.26668666562375, 243.49842951291757, 120.54647935045809, 79.05741086463254, 26.38971066782135])
#dbzCount = 140
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
a.responseImages = [] #2014-05-02
#for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
for sigma in sigmas:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#####################################################################
arr0 = ndimage.filters.gaussian_filter(arr0, sigma=sigmaPreprocessing) # <-- 2014-05-15
#####################################################################
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) #2014-05-07
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**scaleSpacePower #2014-05-14
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
pickle.dump(a.responseImages, open(outputFolder+a.name+"responseImagesList.pydump",'w'))
a_LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+a1.name+"_LOGspec.png",
outputPath = outputFolder+a1.name+"_LOGspec.dat",
cmap = 'jet',
)
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
#print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
print "saving to:", a_LOGspec.imagePath
#a_LOGspec.saveImage()
print a_LOGspec.outputPath
#a_LOGspec.saveMatrix()
#a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png")
pickle.dump(a_LOGspec, open(outputFolder+ a_LOGspec.name + ".pydump","w"))
# end computing the sigma for which the LOG has max response
# 2014-05-02
##############################################################################
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
sigmas = np.array([v['sigma'] for v in L])
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(sigmas,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name: " + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma=\t\t" + str(sigmas.tolist()) + '\n'
summaryText += "streamMean=\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean* (sigmas**sigmaPower))
plt.title(ds.name + '- average laplacian-of-gaussian numerical spectrum\n' +\
'for ' +str(dbzCount) + ' DBZ patterns\n' +\
'suppressed by a factor of sigma^' + str(sigmaPower) )
plt.savefig(outputFolder + ds.name + "_average_LoG_numerical_spectrum.png")
plt.close()
summaryFile.close()
|
yaukwankiu/armor
|
tests/modifiedMexicanHatTest15_march2014wrf.py
|
Python
|
cc0-1.0
| 7,794
|
[
"Gaussian"
] |
d550095ca62f3252c1980b4bc6894b32cd143081b1845701f717fcfc728c383f
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Read all timesteps of the halos point files (vtu), each of which has a
### fiew halo points, and also read the resampled magnitude image data files
### (vti).
###
### For the few halo points, we create a glyph filter, scaling the glyph size
### and coloring by the magnitude density. For the image data files, we perform
### some volume rendering, where we update the scalar opacity function for
### time step and "volume index". Then in the application, the transfer function
### can be edited by moving the slider associated with this volume index. This
### script generates a linear transfer function.
###
### This script generates a parametric-image-stack with all combinations of
### timestep, halos on/off and volume on/off, and volume index.
###
### Input Files:
###
### 1) DataExploration/Output/Cosmology/data/run-1/halos-%d.vtu
### 2) DataExploration/Output/Cosmology/data/run-1/magnitude-%d.vti
###
### Output Files:
###
### A cinema dataset into: DataExploration/Output/Cosmology/halo_mag_time_linear
###
import sys, os
from paraview.simple import *
from cinema_utilities import *
from paraview import data_exploration as wx
# Need this import in order to directly rescale transfer functions to data range
from vtkPVServerManagerRenderingPython import *
#import matplotlib.pyplot as plt
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
#path_root = '/Volumes/OLeary'
path_root = '/media/scott/CINEMA FAT'
data_base_path = os.path.join(path_root, 'DataExploration/Data/Cosmology/data/run-1')
halos_pattern = os.path.join(data_base_path, 'halos-%d.vtu')
magnitude_pattern = os.path.join(data_base_path, 'magnitude-%d.vti')
file_times = range(0, 451, 50)
halos_filenames = [ (halos_pattern % time) for time in file_times]
magnitude_filenames = [ (magnitude_pattern % time) for time in file_times]
resolution = 500
output_working_dir = os.path.join(path_root, 'DataExploration/Output/Cosmology/mag_halos_tent')
# -----------------------------------------------------------------------------
# Helper methods
# -----------------------------------------------------------------------------
def buildLookupTables(luts):
for key in luts:
dataRange = luts[key]["range"]
if key == 'SplatterValues':
luts[key]["lut"] = GetLookupTableForArray( key,
1,
RGBPoints = [0.0, 0.368627, 0.309804, 0.635294, 90.00072000576004, 0.196078, 0.533333, 0.741176, 180.0014400115201, 0.4, 0.760784, 0.647059, 270.0021600172801, 0.670588, 0.866667, 0.643137, 360.0028800230402, 0.901961, 0.960784, 0.596078, 450.00360002880024, 1.0, 1.0, 0.74902, 540.0043200345602, 0.996078, 0.878431, 0.545098, 630.0050400403203, 0.992157, 0.682353, 0.380392, 720.0057600460804, 0.956863, 0.427451, 0.262745, 810.0064800518405, 0.835294, 0.243137, 0.309804, 900.0, 0.619608, 0.00392157, 0.258824],
NanColor = [0.500008, 0.0, 0.0],
ColorSpace='RGB',
ScalarRangeInitialized=1.0,
LockScalarRange=1)
else:
luts[key]["lut"] = GetLookupTableForArray( key, 1, RGBPoints=[dataRange[0], 0.231373, 0.298039, 0.752941, (dataRange[0]+dataRange[1])/2, 0.865003, 0.865003, 0.865003, dataRange[1], 0.705882, 0.0156863, 0.14902], VectorMode='Magnitude', NanColor=[0.0, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, LockScalarRange=1)
#def createHatFunctions():
# baseWidth = 0.4
# spacing = baseWidth / 2.0
#
# halfWidth = baseWidth / 2.0
# numberCenters = 4.0 / baseWidth
# centers = [ (baseWidth / 2.0) + (i * baseWidth / 4.0) for i in range(int(numberCenters - 3)) ]
#
# hatFunctions = []
#
# for c in centers:
# startPoint = c - halfWidth
#
# xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
# yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
#
# hatFunctions.append([xPoints, yPoints])
#
# #plt.plot(xPoints, yPoints, marker='o')
#
# #plt.show()
# return hatFunctions
def createHatFunctions():
baseWidth = 0.1
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
view_size = [resolution, resolution]
#angle_steps = [15, 15]
angle_steps = [90, 90]
distance = 470
rotation_axis = [0.0, 1.0, 0.0]
center_of_rotation = [63.999996185675, 63.99996185355, 64.000034331975]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [1.0, 1.0, 1.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
halos_array = ['on', 'off']
volume_array = ['on', 'off']
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
title = "499-1 - Probe the Cosmic Structure of the Dark Universe"
description = """
In the standard model of cosmology, dark energy and dark matter
together account for 95 percent of the mass energy of the universe;
however, their ultimate origin remains a mystery. The Argonne
Leadership Computing Facility (ALCF) will allocate significant
supercomputing resources towards unraveling one of the key
puzzles-the nature of the dark energy causing the universe to
accelerate its current expansion rate.
"""
analysis = wx.AnalysisManager(output_working_dir, title, description)
id = 'magnitude-volume-halos-time'
title = 'Magnitude Volume + Halos/Time'
description = '''
Show the magnitude density using volume visualization with glyphed halos.
'''
analysis.register_analysis(id, title, description, '{time}/{halos}/{volume}/{volumeIdx}/{theta}_{phi}.jpg', "parametric-image-stack")
fng = analysis.get_file_name_generator(id)
exporter = wx.ThreeSixtyImageStackExporter(fng, view, center_of_rotation, distance, rotation_axis, angle_steps)
exporter.set_analysis(analysis)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# Volume
volume = XMLImageDataReader( PointArrayStatus=['SplatterValues'], FileName=magnitude_filenames )
volume_rep = Show(volume)
volume_rep.Representation = 'Volume'
outline = Outline(Input=volume)
outlineRep = Show(outline)
outlineRep.ColorArrayName = [None, '']
outlineRep.DiffuseColor = [0.0, 0.0, 0.0]
outlineRep.LineWidth = 1.0
halos_reader = XMLUnstructuredGridReader( FileName=halos_filenames )
glyph = Glyph(Input = halos_reader, GlyphType="Sphere", GlyphTransform="Transform2" )
glyph.Scalars = ['POINTS', 'magnitude']
glyph.ScaleFactor = 0.004
glyph.ScaleMode = 'scalar'
glyph.GlyphMode = 'All Points'
glyph.GlyphType.ThetaResolution = 16
glyph.GlyphType.PhiResolution = 16
glyph_rep = Show(glyph)
glyph_rep.Representation = 'Surface'
luts = {
"SplatterValues" : {
"range": [0.0, 500.0],
"colorBy": ('POINT_DATA', 'SplatterValues'),
"pwfunc": []
},
"magnitude" : {
"range": [25.0, 913.0],
"colorBy": ('POINT_DATA', 'magnitude'),
"pwfunc": []
},
}
buildLookupTables(luts)
volume_rep.LookupTable = luts['SplatterValues']["lut"]
volume_rep.ColorArrayName = luts['SplatterValues']["colorBy"]
glyph_rep.LookupTable = luts['magnitude']["lut"]
glyph_rep.ColorArrayName = luts['magnitude']["colorBy"]
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
hatFunctions = createHatFunctions()
analysis.begin()
Render()
for time in range(0, len(file_times), 1):
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
#dataRange = getTotalPointDataRange(volume, "SplatterValues")
dataRange = [ 0.0, 900.0 ]
print "Moving to timestep ",time,", new data range: ",dataRange
for halos in halos_array:
if halos == 'on':
glyph_rep.Visibility = 1
else:
glyph_rep.Visibility = 0
fng.update_active_arguments(halos=halos)
fng.update_label_arguments(halos="Halos")
for vlome in volume_array:
fng.update_active_arguments(volume=vlome)
fng.update_label_arguments(volume="Volume")
if vlome == 'on':
volume_rep.Visibility = 1
for volumeIdx in range(10):
curRange = dataRange[1] - dataRange[0]
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
pwfPoints = []
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
newPwf = CreatePiecewiseFunction(Points=pwfPoints)
volume_rep.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
else:
volume_rep.Visibility = 0
for volumeIdx in range(10):
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
analysis.end()
|
Kitware/cinema
|
scripts/data_generation/cosmology/hacc_magnitude_halos_tent.py
|
Python
|
bsd-3-clause
| 11,003
|
[
"ParaView"
] |
5a6a425d5e244cc49655a04b9d30966aa6d81941605c509e61bfaadc6c3f355c
|
#_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2007)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
from math import sqrt
from sherpa.utils import SherpaTestCase
from sherpa.optmethods import optfcts
## from sherpa.optmethods import myoptfcts
from sherpa.optmethods import _tstoptfct
class test_optmethods(SherpaTestCase):
def setUp(self):
self.tolerance = 1.0e-2 #sqrt(optfcts.EPSILON)
self.mc = '_montecarlo'
self.nm = '_neldermead'
self.lm = '_lmdif'
self.verbose = False
def print_result( self, name, f, x, nfev ):
print '%s(%s) = %g in %d nfev' % (name, x, f, nfev)
def tst_all( self, name, fct, fmin, x0, xmin, xmax,
iprint=False ):
self.tst( optfcts.neldermead, name + self.nm, fct, fmin,
x0, xmin, xmax, iprint=iprint )
## self.tst( myoptfcts.nelder_mead, name + self.nm, fct, fmin,
## x0, xmin, xmax, iprint=iprint )
self.tst( optfcts.montecarlo, name + self.mc , fct, fmin,
x0, xmin, xmax, iprint=iprint, maxfev=8192*len(x0) )
self.tst( optfcts.lmdif, name + self.lm, fct, fmin,
x0, xmin, xmax, iprint=iprint )
def tst( self, optmethod, name, fct, fmin, x0, xmin, xmax,
maxfev=4096, iprint=False ):
status, x, fval, msg, stuff = optmethod( fct, x0, xmin, xmax, maxfev=maxfev*len(x0))
nfev = stuff.get('nfev')
if iprint:
print 'fmin = %g vs fval = %g' % ( fmin, fval )
if self.verbose or iprint:
self.print_result( name, fval, x, nfev )
self.assertEqualWithinTol( fval, fmin, self.tolerance )
def test_rosenbrock(self):
name = 'rosenbrock'
npar = 4
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.rosenbrock, fmin, x0, xmin, xmax )
## def test_freudenstein_roth(self):
## name = 'freudenstein_roth'
## npar = 8
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## #self.tst_all( name, _tstoptfct.freudenstein_roth_fct,
## # _tstoptfct.freudenstein_roth,
## # fmin, x0, xmin, xmax )
def test_powell_badly_scaled(self):
name = 'powell_badly_scaled'
npar = 2
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst( optfcts.neldermead, name + self.nm,
_tstoptfct.powell_badly_scaled, fmin, x0, xmin, xmax )
self.tst( optfcts.montecarlo, name + self.mc,
_tstoptfct.powell_badly_scaled, fmin, x0, xmin, xmax )
def test_brown_badly_scaled(self):
name = 'brown_badly_scaled'
npar = 2
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.brown_badly_scaled, fmin, x0, xmin,
xmax )
def test_beale(self):
name = 'beale'
npar = 2
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name,_tstoptfct.beale, fmin, x0, xmin, xmax )
def test_jennrich_sampson(self):
name = 'jennrich_sampson'
npar = 2
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.jennrich_sampson, fmin, x0, xmin, xmax )
def test_helical_valley(self):
name = 'helical_valley'
npar = 3
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.helical_valley, fmin, x0, xmin, xmax )
def test_bard(self):
name = 'bard'
npar = 3
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.bard, fmin, x0, xmin, xmax )
def test_gaussian(self):
name = 'gaussian'
npar = 3
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst( optfcts.lmdif, name + self.lm, _tstoptfct.gaussian,
fmin, x0, xmin, xmax )
## # This test actually passed, there is a bug with assertEqualWithinTol
## def test_meyer(self):
## name = 'meyer'
## npar = 3
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## self.tst_all( name, _tstoptfct.meyer_fct, _tstoptfct.meyer,
## fmin, x0, xmin, xmax )
def test_gulf_research_development(self):
name = 'gulf_research_development'
npar = 3
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.gulf_research_development,
fmin, x0, xmin, xmax )
def test_box3d(self):
name = 'box3d'
npar = 3
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.box3d, fmin, x0, xmin, xmax )
## def test_powell_singular(self):
## name = 'powell_singular'
## npar = 4
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## self.tst_all( name, _tstoptfct.powell_singular_fct,
## _tstoptfct.powell_singular, fmin, x0, xmin, xmax )
def test_wood(self):
name = 'wood'
npar = 4
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst( optfcts.neldermead, name + self.nm, _tstoptfct.wood,
fmin, x0, xmin, xmax )
self.tst( optfcts.montecarlo, name + self.mc, _tstoptfct.wood,
fmin, x0, xmin, xmax )
def test_kowalik_osborne(self):
name = 'kowalik_osborne'
npar = 4
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.kowalik_osborne, fmin, x0, xmin, xmax )
def test_brown_dennis(self):
name = 'brown_dennis'
npar = 4
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.brown_dennis, fmin, x0, xmin, xmax )
## # NelderMead actually finds a lower minimum then the published result!
## def test_osborne1(self):
## name = 'osborne1'
## npar = 5
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## self.tst( optfcts.lmdif, name + self.lm, _tstoptfct.osborne1,
## fmin, x0, xmin, xmax )
## # look at why it fails for monte carlo
def test_biggs(self):
name = 'biggs'
npar = 6
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
#self.tst( optfcts.neldermead, name + self.nm, _tstoptfct.biggs_fct,
# fmin, x0, xmin, xmax )
self.tst( optfcts.lmdif, name + self.lm, _tstoptfct.biggs,
fmin, x0, xmin, xmax )
def test_osborne2(self):
name = 'osborne2'
npar = 11
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.osborne2, fmin, x0, xmin, xmax )
def test_watson(self):
name = 'watson'
npar = 6
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst( optfcts.neldermead, name + self.nm, _tstoptfct.watson,
fmin, x0, xmin, xmax )
self.tst( optfcts.montecarlo, name + self.mc, _tstoptfct.watson,
fmin, x0, xmin, xmax )
## # This test actually passed, there is a bug with assertEqualWithinTol
## def test_penaltyI(self):
## name = 'penaltyI'
## npar = 4
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## self.tst_all( name, _tstoptfct.penaltyI_fct,
## _tstoptfct.penaltyI, fmin, x0, xmin, xmax )
## def test_penaltyII(self):
## name = 'penaltyII'
## npar = 4
## x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
## ## mac(ppc only) has issue with this problem, by pass it for now.
## ## The problem is probably due to the compiler, will re-visit later.
## self.tst( optfcts.neldermead, name + self.nm, _tstoptfct.penaltyII,
## fmin, x0, xmin, xmax )
## self.tst( optfcts.montecarlo, name + self.mc, _tstoptfct.penaltyII,
## fmin, x0, xmin, xmax )
def test_variably_dimensioned(self):
name = 'variably_dimensioned'
npar = 5
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.variably_dimensioned, fmin, x0, xmin,
xmax )
def test_trigonometric(self):
name = 'trigonometric'
npar = 9
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst( optfcts.neldermead, name + self.nm,
_tstoptfct.trigonometric, fmin, x0, xmin, xmax )
self.tst( optfcts.montecarlo, name + self.mc,
_tstoptfct.trigonometric, fmin, x0, xmin, xmax )
# lmdif gets a smaller min then published result
#def test_brown_almost_linear(self):
# name = 'brown_almost_linear'
# npar = 7
# x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
# self.tst( optfcts.lmdif, name + self.lm,
# _tstoptfct.brown_almost_linear, fmin, x0, xmin, xmax )
def test_discrete_boundary(self):
name = 'discrete_boundary'
npar = 5
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.discrete_boundary, fmin, x0, xmin,
xmax )
def test_discrete_integral(self):
name = 'discrete_integral'
npar = 5
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.discrete_integral, fmin, x0, xmin,
xmax )
def test_broyden_tridiagonal(self):
name = 'broyden_tridiagonal'
npar = 16
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.broyden_tridiagonal, fmin, x0,
xmin, xmax )
def test_broyden_banded(self):
name = 'broyden_banded'
npar = 18
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.broyden_banded, fmin, x0, xmin, xmax )
def test_linear_fullrank(self):
name = 'linear_fullrank'
npar = 18
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.linear_fullrank, fmin, x0, xmin, xmax )
def test_linear_fullrank1(self):
name = 'linear_fullrank1'
npar = 15
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.linear_fullrank1, fmin, x0, xmin, xmax )
def test_linear_fullrank0cols0rows(self):
name = 'linear_fullrank0cols0rows'
npar = 13
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.linear_fullrank0cols0rows, fmin, x0,
xmin, xmax )
def test_chebyquad(self):
name = 'chebyquad'
npar = 11
x0, xmin, xmax, fmin = _tstoptfct.init( name, npar )
self.tst_all( name, _tstoptfct.chebyquad, fmin, x0, xmin, xmax )
def tstme():
from sherpa.utils import SherpaTest
import sherpa.optmethods
SherpaTest(sherpa.optmethods).test()
#
# pushd ../../../ ; makeme ; popd ; python test_optmethods.py
#
if __name__ == '__main__':
tstme()
|
brefsdal/sherpa
|
sherpa/optmethods/tests/test_optmethods.py
|
Python
|
gpl-2.0
| 11,391
|
[
"Gaussian",
"VisIt"
] |
b06023cee5a9df92c21b1c0b28b46e44d8d34419d4639a682044cb118a7ce781
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import random
import numpy as np
import pandas as pd
from copy import copy
from pymatgen import Structure
def doc_from(structure, energy=None, force=None, stress=None):
"""
Method to convert structure and its properties into doc
format for further processing. If properties are None, zeros
array will be used.
Args:
structure (Structure): Pymatgen Structure object.
energy (float): The total energy of the structure.
force (np.array): The (m, 3) forces array of the structure
where m is the number of atoms in structure.
stress (list/np.array): The (6, ) stresses array of the
structure.
Returns:
(dict)
"""
energy = energy if energy else 0.0
force = force if force else np.zeros((len(structure), 3))
stress = stress if stress else np.zeros(6)
outputs = dict(energy=energy, forces=force,
virial_stress=stress)
doc = dict(structure=structure.as_dict(),
num_atoms=len(structure),
outputs=outputs)
return doc
def pool_from(structures, energies=None, forces=None, stresses=None):
"""
Method to convert structures and their properties in to
datapool format.
Args:
structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
forces ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
stresses (list): List of (6, ) virial stresses of each
structure in structures list.
Returns:
([dict])
"""
energies = energies if energies else [None] * len(structures)
forces = forces if forces else [None] * len(structures)
stresses = stresses if stresses else [None] * len(structures)
datapool = [doc_from(structure, energy, force, stress)
for structure, energy, force, stress
in zip(structures, energies, forces, stresses)]
return datapool
def convert_docs(docs, include_stress=False, **kwargs):
"""
Method to convert a list of docs into objects, e.g.,
Structure and DataFrame.
Args:
docs ([dict]): List of docs. Each doc should have the same
format as one returned from .dft.parse_dir.
include_stress (bool): Whether to include stress.
Returns:
A list of structures, and a DataFrame with energy and force
data in 'y_orig' column, data type ('energy' or 'force') in
'dtype' column, No. of atoms in 'n' column sharing the same row
of energy data while 'n' being 1 for the rows of force data.
"""
structures, y_orig, n, dtype = [], [], [], []
for d in docs:
if isinstance(d['structure'], dict):
structure = Structure.from_dict(d['structure'])
else:
structure = d['structure']
outputs = d['outputs']
force_arr = np.array(outputs['forces'])
assert force_arr.shape == (len(structure), 3), \
'Wrong force array not matching structure'
force_arr = force_arr.ravel()
if include_stress:
stress_arr = np.array(outputs['virial_stress'])
y = np.concatenate(([outputs['energy']], force_arr, stress_arr))
n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
dtype.extend(['energy'] + ['force'] * len(force_arr) + ['stress'] * 6)
else:
y = np.concatenate(([outputs['energy']], force_arr))
n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
dtype.extend(['energy'] + ['force'] * len(force_arr))
y_orig.append(y)
structures.append(structure)
df = pd.DataFrame(dict(y_orig=np.concatenate(y_orig), n=np.concatenate(n),
dtype=dtype))
for k, v in kwargs.items():
df[k] = v
return structures, df
class MonteCarloSampler(object):
"""
Sample a subset from the dataset to achieve some criteria.
For example, one needs to subset the data so that a fraction
of the data can already cover a large feature space,
i.e., maximizing the distances.
"""
def __init__(self, datasets, num_samples, cost_function):
"""
Sample a subset with size num_samples from datasets
to minimize the cost function.
Args:
datasets (numpy.array): The total datasets.
num_samples (int): Number of samples from the data.
cost_function (function): Function that takes into
a subset of the data and calculate a cost.
"""
self.datasets = datasets
self.num_samples = num_samples
self.cost_function = cost_function
self.num_total = len(datasets)
self.num_remain = self.num_total - num_samples
self.index_selected = list(np.random.choice(
self.num_total, num_samples, replace=False))
self._get_remain_index()
self.cost = self.compute_cost(self.datasets[self.index_selected, :])
self.accepted = 0
self.rejected = 0
self.cost_history = []
self.cost_history.append(self.cost)
def _get_remain_index(self):
self.index_remain = sorted(list(set(range(self.num_total)) -
set(self.index_selected)))
def compute_cost(self, data_subset):
"""
Compute the cost of data subsets.
Args:
data_subset (numpy.array): Data subset.
"""
return self.cost_function(data_subset)
def sample(self, num_attempts, t_init, t_final):
"""
Metropolis sampler. For every sampling attempt, one data entry is
swapped with the data reservior. Then the energy difference is evaluated.
If dE < 0, the swapping is accepted. If dE > 0, then it is accepted with
probability exp(-dE / T), where T is some artificial temperature. We can
start with a relatively large T, and then reduce it with sampling process
going on.
Args:
num_attempts (int): Number of sampling attempts.
t_init (float): Initial temperature.
t_final (float): Final temperature.
"""
temperatures = np.linspace(t_init, t_final, num_attempts)
for i in range(num_attempts):
temperature = temperatures[i]
index = random.choice(self.index_selected)
index_remain = random.choice(self.index_remain)
self.update(index, index_remain, temperature)
self.cost_history.append(self.cost)
def update(self, index, index_remain, temperature):
"""
Implement the data swap, if it is accepted.
Args:
index (int): The index of selected feature matrix
used for swapping.
index_remain (int): The index of remaining feature matrix
used for swapping.
temperature (float): Artificial temperature.
"""
new_selected = copy(self.index_selected)
new_selected.remove(index)
new_selected.append(index_remain)
cost_after_swap = self.compute_cost(self.datasets[new_selected, :])
d_cost = cost_after_swap - self.cost
accept = self.decision(d_cost, temperature)
if accept:
self.index_selected = copy(new_selected)
self._get_remain_index()
self.cost = cost_after_swap
else:
pass
def decision(self, d_cost, temperature):
"""
Decision on accepting the data swap.
Args:
d_cost (float): Difference between cost in proposed move.
temperature (float): Temperature.
"""
if d_cost < 0:
self.accepted += 1
return True
else:
p = np.exp(-d_cost / temperature)
p2 = np.random.rand(1)
if p2 < p:
self.accepted += 1
return True
else:
self.rejected += 1
return False
|
materialsvirtuallab/veidt
|
veidt/potential/processing.py
|
Python
|
bsd-3-clause
| 8,390
|
[
"pymatgen"
] |
159ac56acad87e4b6c3fb4e59407f62fc47e6bcace368d1cdc910486a19eeda1
|
#!/usr/bin/env python3
# Implementation of BEB model, see: [1]
# Electron-impact ionization cross sections for polyatomic molecules, radicals, and ions
# Yong-Ki Kim and Karl K. Irikura, 2000
# https://doi.org/10.1063/1.1336281
# The original paper:
# Binary-encounter-dipole model for electron-impact ionization
# Kim, Yong Ki, Rudd, M. Eugene Phys Rev A, 1994, 5, 3954-3967
# Cross sections for K-shell ionization of atoms by electron impact
# Santos, J P, Parente, F, Kim, Y-k, Journal of Physics B: Atomic, Molecular and Optical Physics, 2003
# WARNING: Currrent implementation works only for closed shell molecules!
# A very simple model valid only for atoms from Talukder et al is also implemented.
# Empirical model for electron impact ionization cross sections
# M.R. Talukder et al The European Physical Journal D 46, 281-287, 2008
import math, re, sys
import argparse
AU2EV = 27.2114
ANG2BOHR = 1.889726132873
def read_cmd():
"""Reading from command line"""
desc = "Binary Encounter Bethe (BEB) model:\n \
electron impact photoionization cross section from first principles"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-i", "--input_file", dest="inp_file", help="Gaussian output file with MO parameters.")
parser.add_argument("-m", "--model", dest="model",default="beb", help="Which model? (beb|talukder).")
parser.add_argument("-U", dest="U", type=float, help="electron orbital kinetic energy [ev]")
parser.add_argument("--Tmax", dest="Tmax", type=float, default=1000., help="maximum kin. energy of ionizing electron [ev]")
parser.add_argument("-T", dest="T",type=float, help="kinetic energy [ev] of the ionizing electron")
parser.add_argument("-B", dest="B",type=float, help="electron binding energy [ev]")
parser.add_argument("-N", dest="N",type=int, default=2, help="number of eletrons in the orbital")
parser.add_argument("-n", dest="n",type=int, help="Talukder model, principal quantum number")
parser.add_argument("-l", dest="l",type=int, help="Talukder model, azimuthal quantum number")
parser.add_argument("-c", "--charge", dest="charge",type=int, default=0, help="Charge")
return parser.parse_args()
# Implementation from:
# Electron-Impact lonization Cross Sections for Polyatomic Molecules, Radicals, and Ions
# Kim, Yong-Ki,Irikura, Karl K
# https://doi.org/10.1063/1.1336281
# Equation 3
def beb_cross_section(T, B, U, N, charge):
"""Calculates electron impact ionization cross section for a given MO.
Input params should be in atomic units!
T = kinetic energy of ionizing electron
B = electron binding energy (VIE)
U = orbital kinetic energy
N = electron occupation number of a given orbital"""
# Definitions from Section 1, pp. 221,
# between equations (2) and (3)
a0 = 1 # Bohr radius [au]
R = 0.5 # Rydberg energy [au]
t = T / B
u = U / B
denom = 1
if (charge == 1):
# modification for singly charged ions, see
# Section 2.2
denom = 2
S = 4 * math.pi * a0**2 * N * (R/B)**2
# BEB Equation (3), page 221
x1 = S / (t + (u + 1)/denom)
x2 = math.log(t) / 2 * (1 - 1 / t**2)
x3 = 1 - 1/t - math.log(t)/(1+t)
sigma_BEB = x1 * (x2 + x3)
return sigma_BEB
def talukder_Anl(B, n, l):
"""Implements equations 3 on page 282
B = ionization energy
n = principal quantum number
l = azimuthal quantum number"""
R = 0.5 # Rydberg energy in Atomic units
Ur = B / R
if n == 1:
if l == 0:
Anl = 3.97 * 10**(-11) * Ur / (1+20.74*Ur)**(3.6)
else:
Anl = 3.88 * 10**(-14) * Ur / (1+6.96*Ur)**3
else:
if l == 0:
Anl = 9.14*10**(-11)*Ur / (1+68.32*Ur)**3
else:
Anl = 1.22 * 10**(-6) * Ur / (1+566.46*Ur)**(3.5)
# Convert from cm**2 to atomic units
return Anl * 10**16 * ANG2BOHR**2
def talukder_Bnl(B, n, l):
"""Implements equations 3 on page 282
B = ionization energy
n principal quantum number
l = azimuthal quantum number"""
R = 0.5 # Rydberg energy in Atomic units
Ur = B / R
if n == 1:
if l == 0:
Bnl = 2.29 * 10**(-10) * Ur / (1+39.9*Ur)**(3.6)
else:
Bnl = 4.36 * 10**(-16) * Ur / (1+0.33*Ur)**8
else:
if l == 0:
Bnl = 3.83 * 10**(-11) * Ur / (1+60.95*Ur)**3
else:
Bnl = - 4.39 * 10**(-9) * Ur / (1+102.87*Ur)**3.7
# Convert from cm**2 to atomic units
return Bnl * 10**16 * ANG2BOHR**2
def talukder_cross_section(T, B, n, l, N):
"""Calculates electron impact ionization cross section for a given MO.
based on simple model of Talukder et al
Input params should be in atomic units!
T = kinetic energy of ionizing electron
B = electron binding energies of atomic orbitals (VIE)
N = electron occupation number of a given orbital"""
# for n in range(1,8):
# for l in range(n):
# for i in range(2*l+1):
Anl = talukder_Anl(B, n, l)
Bnl = talukder_Bnl(B, n, l)
sigma = Anl*math.log(T/B)+Bnl*(1-B/T)
sigma = sigma * B * N / T
return sigma
def parse_gaussian(infile, E_orb, Ekin_orb):
"""Parses Gaussian output and extracts
MO binding energies and MO kinetic energies"""
# This needs to be matched
# Orbital energies and kinetic energies (alpha):
# 1 O -19.001985 29.005918
start_line = ' Orbital energies and kinetic energies (alpha):\n'
dec = r' +-?\d+\.\d+' # regex matching decimal numbers
reg = re.compile(r'^ +[0-9]+ +O'+dec+dec+r'$')
with open(infile,"r") as f:
read = False
for line in f:
# This is because of false positives in the beginning of log file
# i.e. we do not want to match oxygen atom in Z-matrix
if line == start_line:
read = True
res = reg.search(line)
if res and read:
E_orb.append(-float(line.split()[2]))
Ekin_orb.append(float(line.split()[3]))
# Print the parsed value for the user to check
print("# MO binding energies read from G09 output")
print("#", E_orb)
print("# MO kinetic energies read from G09 output")
print("#", Ekin_orb)
if __name__ == "__main__":
help_me = "Use \"-h\" to get help"
opts = read_cmd()
# Let's try H2+
#B = 30 / AU2EV
#U = 16.4 / AU2EV
#N = 1
#charge = +1
# neutral H2
#B = 15.43 / AU2EV
#U = 15.98 / AU2EV
#N = 2
#charge = 0
Ekin = []
Eorb = []
if not opts.inp_file and (not opts.B and not opts.U):
print("ERROR: You did not provide Gaussian output file as a parameter")
print("Alternatively, you could provide B and U parameters")
print(help_me)
sys.exit(1)
if opts.inp_file:
parse_gaussian(opts.inp_file, Eorb, Ekin)
else:
Eorb.append(opts.B / AU2EV)
Ekin.append(opts.U / AU2EV)
N = opts.N
if opts.charge != 0 and opts.charge != 1:
print("ERROR: Charge must be 0 or 1, other values are not supported!")
sys.exit(1)
Ts = [] # Calculate cross sections for these incident kinetic energies
if opts.T:
Ts.append(opts.T / AU2EV)
else:
Ts = [x/AU2EV for x in range(int(Eorb[-1]*AU2EV), int(opts.Tmax) ) ]
print("# Incident electron energy [eV] | Total Sigma | Sigmas [Angstrom^2] (core electrons first)")
for t in Ts:
sigma = []
total_sigma = 0
if opts.n and opts.l:
n = opts.n
l = opts.l
m_l = -l
else:
n = 1
l = 0
m_l = 0
# Iterate over orbitals
for i in range(len(Eorb)):
if Eorb[i] <= t:
if opts.model == "beb":
s = beb_cross_section(t, Eorb[i], Ekin[i], N, opts.charge)
elif opts.model == "talukder":
s = talukder_cross_section(t, Eorb[i], n, l, N)
else:
print("ERROR: Invalid model!")
print(help_me)
sys.exit(1)
else:
s = 0
sigma.append(s / ANG2BOHR / ANG2BOHR)
total_sigma += sigma[-1]
# Aufbau principle (needed for Talukber model)
if m_l == l:
l += 1
m_l = -l
else:
m_l += 1
if l == n:
l = 0
n += 1
m_l = 0
print(t*AU2EV, total_sigma, " ".join(str(s) for s in sigma))
|
PHOTOX/photoxrepo
|
MISC/BEB/beb.py
|
Python
|
mit
| 8,370
|
[
"Gaussian"
] |
99044094f5b77da33d66d2f35d624a65e95e5fca2f250a351d2de6a43decf85c
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2016 Sylvain Taverne <taverne.sylvain@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
import sys
from subprocess import Popen
# Import from itools
from itools.fs.lfs import LocalFolder
from itools.uri import get_uri_name, Path
class GulpBuilder(object):
"""
Run "gulp build" in project's repository & add generated files
$ ui/{SKINS}/*
into the project MANIFEST file.
That allow to avoid commit compiled JS/CSS files into GIT.
"""
def __init__(self, package_root, worktree, manifest):
self.package_root = package_root
if self.package_root != '.':
self.ui_path = '{0}/ui/'.format(self.package_root)
else:
self.ui_path = 'ui/'
self.worktree = worktree
self.manifest = manifest
self.fs = LocalFolder('.')
if self.fs.is_folder(self.ui_path):
self.dist_folders = tuple(['{0}{1}'.format(self.ui_path, x)
for x in LocalFolder(self.ui_path).get_names()])
def run(self):
npm_done = self.launch_npm_install()
gulp_done = self.launch_gulp_build()
webpack_done = self.launch_webpack()
# Add DIST files into manifest
if (npm_done or gulp_done or webpack_done) and self.fs.exists(self.ui_path):
for path in self.fs.traverse(self.ui_path):
relative_path = self.fs.get_relative_path(path)
if (relative_path and
relative_path.startswith(self.dist_folders) and self.fs.is_file(path)):
self.manifest.add(relative_path)
def launch_npm_install(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'package.json':
print '***'*25
print '*** Run $ npm install on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['npm', 'install'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running npm install ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_gulp_build(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'gulpfile.js':
print '***'*25
print '*** Run $ gulp build on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['./node_modules/.bin/gulp', 'build'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running gulp ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_webpack(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'webpack.config.js':
print '***'*25
print '*** Run $ webpack ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['./node_modules/.bin/webpack', '--mode=production'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running webpack ', path
print '***'*25
sys.exit(1)
done = True
return done
|
hforge/itools
|
itools/pkg/build_gulp.py
|
Python
|
gpl-3.0
| 4,312
|
[
"GULP"
] |
997d40931f2b393e09bba9fea6b10455a91043e64a027e1b3119f940cd61bc1e
|
# Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
from unittest import TestCase
import requests
from mock import (
patch,
)
from httmock import (
HTTMock,
urlmatch,
response
)
import macaroonbakery.httpbakery as httpbakery
ID_PATH = 'http://example.com/someprotecteurl'
json_macaroon = {
u'identifier': u'macaroon-identifier',
u'caveats': [
{
u'cl': u'http://example.com/identity/v1/discharger',
u'vid': u'zgtQa88oS9UF45DlJniRaAUT4qqHhLxQzCeUU9N2O1Uu-'
u'yhFulgGbSA0zDGdkrq8YNQAxGiARA_-AGxyoh25kiTycb8u47pD',
u'cid': u'eyJUaGlyZFBhcnR5UHV'
}, {
u'cid': u'allow read-no-terms write'
}, {
u'cid': u'time-before 2158-07-19T14:29:14.312669464Z'
}],
u'location': u'charmstore',
u'signature': u'52d17cb11f5c84d58441bc0ffd7cc396'
u'5115374ce2fa473ecf06265b5d4d9e81'
}
discharge_token = [{
u'identifier': u'token-identifier===',
u'caveats': [{
u'cid': u'declared username someone'
}, {
u'cid': u'time-before 2158-08-15T15:55:52.428319076Z'
}, {
u'cid': u'origin '
}],
u'location': u'https://example.com/identity',
u'signature': u'5ae0e7a2abf806bdd92f510fcd3'
u'198f520691259abe76ffae5623dae048769ef'
}]
discharged_macaroon = {
u'identifier': u'discharged-identifier=',
u'caveats': [{
u'cid': u'declared uuid a1130b10-3deb-59b7-baf0-c2a3f83e7382'
}, {
u'cid': u'declared username someone'
}, {
u'cid': u'time-before 2158-07-19T15:55:52.432439055Z'
}],
u'location': u'',
u'signature': u'3513db5503ab17f9576760cd28'
u'ce658ce8bf6b43038255969fc3c1cd8b172345'
}
@urlmatch(path='.*/someprotecteurl')
def first_407_then_200(url, request):
if request.headers.get('cookie', '').startswith('macaroon-'):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
else:
resp = response(status_code=407,
content={
'Info': {
'Macaroon': json_macaroon,
'MacaroonPath': '/',
'CookieNameSuffix': 'test'
},
'Message': 'verification failed: no macaroon '
'cookies in request',
'Code': 'macaroon discharge required'
},
headers={'Content-Type': 'application/json'})
return request.hooks['response'][0](resp)
@urlmatch(netloc='example.com:8000', path='.*/someprotecteurl')
def first_407_then_200_with_port(url, request):
if request.headers.get('cookie', '').startswith('macaroon-'):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
else:
resp = response(status_code=407,
content={
'Info': {
'Macaroon': json_macaroon,
'MacaroonPath': '/',
'CookieNameSuffix': 'test'
},
'Message': 'verification failed: no macaroon '
'cookies in request',
'Code': 'macaroon discharge required'
},
headers={'Content-Type': 'application/json'},
request=request)
return request.hooks['response'][0](resp)
@urlmatch(path='.*/someprotecteurl')
def valid_200(url, request):
return {
'status_code': 200,
'content': {
'Value': 'some value'
}
}
@urlmatch(path='.*/discharge')
def discharge_200(url, request):
return {
'status_code': 200,
'content': {
'Macaroon': discharged_macaroon
}
}
@urlmatch(path='.*/discharge')
def discharge_401(url, request):
return {
'status_code': 401,
'content': {
'Code': 'interaction required',
'Info': {
'VisitURL': 'http://example.com/visit',
'WaitURL': 'http://example.com/wait'
}
},
'headers': {
'WWW-Authenticate': 'Macaroon'
}
}
@urlmatch(path='.*/wait')
def wait_after_401(url, request):
if request.url != 'http://example.com/wait':
return {'status_code': 500}
return {
'status_code': 200,
'content': {
'DischargeToken': discharge_token,
'Macaroon': discharged_macaroon
}
}
@urlmatch(path='.*/wait')
def wait_on_error(url, request):
return {
'status_code': 500,
'content': {
'DischargeToken': discharge_token,
'Macaroon': discharged_macaroon
}
}
class TestBakery(TestCase):
def assert_cookie_security(self, cookies, name, secure):
for cookie in cookies:
if cookie.name == name:
assert cookie.secure == secure
break
else:
assert False, 'no cookie named {} found in jar'.format(name)
def test_discharge(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_200):
resp = requests.get(ID_PATH,
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
self.assert_cookie_security(client.cookies, 'macaroon-test',
secure=False)
@patch('webbrowser.open')
def test_407_then_401_on_discharge(self, mock_open):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_401), \
HTTMock(wait_after_401):
resp = requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
resp.raise_for_status()
mock_open.assert_called_once_with(u'http://example.com/visit', new=1)
assert 'macaroon-test' in client.cookies.keys()
@patch('webbrowser.open')
def test_407_then_error_on_wait(self, mock_open):
client = httpbakery.Client()
with HTTMock(first_407_then_200), HTTMock(discharge_401),\
HTTMock(wait_on_error):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(str(exc.exception),
'cannot start interactive session: cannot get '
'http://example.com/wait')
mock_open.assert_called_once_with(u'http://example.com/visit', new=1)
def test_407_then_no_interaction_methods(self):
client = httpbakery.Client(interaction_methods=[])
with HTTMock(first_407_then_200), HTTMock(discharge_401):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(str(exc.exception),
'cannot start interactive session: interaction '
'required but not possible')
def test_407_then_unknown_interaction_methods(self):
class UnknowInteractor(httpbakery.Interactor):
def kind(self):
return 'unknown'
client = httpbakery.Client(interaction_methods=[UnknowInteractor()])
with HTTMock(first_407_then_200), HTTMock(discharge_401):
with self.assertRaises(httpbakery.InteractionError) as exc:
requests.get(
ID_PATH,
cookies=client.cookies,
auth=client.auth(),
)
self.assertEqual(str(exc.exception),
'cannot start interactive session: no methods '
'supported')
def test_cookie_with_port(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200_with_port):
with HTTMock(discharge_200):
resp = requests.get('http://example.com:8000/someprotecteurl',
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
def test_secure_cookie_for_https(self):
client = httpbakery.Client()
with HTTMock(first_407_then_200_with_port), HTTMock(discharge_200):
resp = requests.get(
'https://example.com:8000/someprotecteurl',
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
assert 'macaroon-test' in client.cookies.keys()
self.assert_cookie_security(client.cookies, 'macaroon-test',
secure=True)
|
fabricematrat/py-macaroon-bakery
|
macaroonbakery/tests/test_bakery.py
|
Python
|
lgpl-3.0
| 9,500
|
[
"VisIt"
] |
4c22d42663aad20cc084bbd35ae8b4cb333916468dedc144a14f304c5c103514
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scf.scf."""
import os
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from pyscf.lib import parameters
import tensorflow.compat.v1 as tf
from symbolic_functionals.syfes.scf import scf
from symbolic_functionals.syfes.symbolic import xc_functionals
from symbolic_functionals.syfes.xc import mgga
from symbolic_functionals.syfes.xc import utils
from symbolic_functionals.syfes.xc import xc
jax.config.update('jax_enable_x64', True)
class SCFTest(parameterized.TestCase):
def setUp(self):
super().setUp()
parameters.TMPDIR = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)
def test_parse_xyz(self):
xyz_path = os.path.join(flags.FLAGS.test_tmpdir, 'test.xyz')
with tf.io.gfile.GFile(xyz_path, 'w') as f:
f.write('\n0 1\nO 0. 0. 0.\nH 0. -0.757 0.587\nH 0. 0.757 0.587\n')
atom, charge, spin = scf.parse_xyz(xyz_path)
self.assertLen(atom.split(';'), 3)
self.assertEqual(charge, 0)
self.assertEqual(spin, 0)
# expected values for Etot, Exc and Exx are computed by external PySCF
@parameterized.parameters(
('pbe,pbe', 0, 0, (17812,), (6, 17812), {
'Etot': -1.1601348451265638,
'Exc': -0.6899737187913197,
'Exx': -0.6583555393862027,
'Exxlr': 0.0,
'Enlc': 0.0
}),
('pbe,pbe', -1, 1, (20048,), (2, 6, 20048), {
'Etot': -1.0336723063997342,
'Exc': -0.8723776781828819,
'Exx': -0.8141180655850809,
'Exxlr': 0.0,
'Enlc': 0.0
}),
('wb97m_v', 0, 0, (17812,), (6, 17812), {
'Etot': -1.1537971220466094,
'Exc': -0.6829720417857192,
'Exx': -0.6577521311181448,
'Exxlr': -0.29729171800068577,
'Enlc': 0.00891190761270658
}),
('wb97m_v', -1, 1, (20048,), (2, 6, 20048), {
'Etot': -1.007161017404796,
'Exc': -0.8544883116165207,
'Exx': -0.8220018420576689,
'Exxlr': -0.40928226576050875,
'Enlc': 0.012704771979755908
}),
)
def test_scf_calculation_with_pyscf(self, xc_name, charge, spin,
expected_weights_shape,
expected_rho_shape, expected_energies):
res = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc=xc_name,
basis='def2svpd')
self.assertCountEqual(
list(res.keys()),
scf.SCF_SCALAR_RESULTS + ['rho', 'weights'])
self.assertTrue(res['converged'])
self.assertEqual(res['weights'].shape, expected_weights_shape)
self.assertEqual(res['rho'].shape, expected_rho_shape)
for energy in ['Etot', 'Exc', 'Exx', 'Exxlr', 'Enlc']:
np.testing.assert_allclose(res[energy], expected_energies[energy])
@parameterized.parameters(
('lda', 'lda_x,lda_c_pw', 0, 0),
('lda', 'lda_x,lda_c_pw', -1, 1),
('pbe', 'pbe,pbe', 0, 0),
('pbe', 'pbe,pbe', -1, 1),
('b97', 'hyb_gga_xc_b97', 0, 0),
('b97', 'hyb_gga_xc_b97', -1, 1),
('wb97x_v', 'wb97x_v', 0, 0),
('wb97x_v', 'wb97x_v', -1, 1),
('b97m_v', 'b97m_v', 0, 0),
('b97m_v', 'b97m_v', -1, 1),
('wb97m_v', 'wb97m_v', 0, 0),
('wb97m_v', 'wb97m_v', -1, 1),
)
def test_scf_calculation_with_custom_xc_default_params(
self, xc_name, xc_name_libxc, charge, spin):
hybrid_coeff, rsh_params = utils.get_hybrid_rsh_params(xc_name)
res_libxc = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc=xc_name_libxc,
basis='def2svpd')
res_custom = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc=xc_name,
xc_fun=xc.make_eval_xc(xc_name),
hybrid_coeff=hybrid_coeff,
rsh_params=rsh_params,
basis='def2svpd')
for energy in ['Etot', 'Exc', 'Exx', 'Exxlr', 'Enlc']:
self.assertAlmostEqual(res_libxc[energy], res_custom[energy], delta=2e-6)
@parameterized.parameters((0, 0), (-1, 1),)
def test_scf_calculation_with_custom_xc_custom_params(self, charge, spin):
hybrid_coeff, rsh_params = utils.get_hybrid_rsh_params('b97m_v')
res_libxc = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc='b97m_v',
basis='def2svpd')
res_custom = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc='b97m_v',
xc_fun=xc.make_eval_xc('wb97m_v', params=mgga.B97MV_PARAMS),
hybrid_coeff=hybrid_coeff,
rsh_params=rsh_params,
basis='def2svpd')
for energy in ['Etot', 'Exc', 'Exx', 'Exxlr', 'Enlc']:
self.assertAlmostEqual(res_libxc[energy], res_custom[energy], delta=2e-6)
@parameterized.parameters((0, 0), (-1, 1),)
def test_scf_calculation_with_symbolic_functional(self, charge, spin):
hybrid_coeff, rsh_params = utils.get_hybrid_rsh_params('wb97m_v')
res_libxc = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc='wb97m_v',
basis='def2svpd')
res_custom = scf.run_scf_for_mol(
atom='H 0. 0. 0.;H 0. 0. 0.74',
charge=charge,
spin=spin,
xc='wb97m_v',
xc_fun=xc_functionals.wb97mv_short.make_eval_xc(
omega=rsh_params[0],
**xc_functionals.WB97MV_PARAMETERS_UTRANSFORM),
hybrid_coeff=hybrid_coeff,
rsh_params=rsh_params,
basis='def2svpd')
for energy in ['Etot', 'Exc', 'Exx', 'Exxlr', 'Enlc']:
self.assertAlmostEqual(res_libxc[energy], res_custom[energy], delta=2e-6)
if __name__ == '__main__':
absltest.main()
|
google-research/google-research
|
symbolic_functionals/syfes/scf/scf_test.py
|
Python
|
apache-2.0
| 6,472
|
[
"PySCF"
] |
0346b45ba66594598027118c0a4f2851541d1a20918d98ce379df90bb06d6afa
|
#!/usr/bin/env python
import os
from optparse import OptionParser
from ase.io.trajectory import print_trajectory_info
from ase.io.bundletrajectory import print_bundletrajectory_info
description = 'Print summary of information from trajectory files.'
def main():
p = OptionParser(usage='%prog file.traj [file2.traj ...]',
description=description)
opts, args = p.parse_args()
if len(args) == 0:
p.error()
for f in args:
if os.path.isfile(f):
print_trajectory_info(f)
elif os.path.isdir(f):
print_bundletrajectory_info(f)
else:
p.error('%s is neither a file nor a directory!' % f)
|
grhawk/ASE
|
tools/ase/cli/info.py
|
Python
|
gpl-2.0
| 667
|
[
"ASE"
] |
b4caad3e64f11525a89a36b7a110d64e9bf5618e2900a842a76410cc76c43e7f
|
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django import forms
from django.shortcuts import render
from scrape.models import Article
from scrape.models import Rating
import collections
import random
import results
def index(request):
return HttpResponse("""
<p>
Welcome to Randart, a site for exploring new and exciting content.
</p>
<a href=/rate> Begin by rating some articles </a>
""")
def inspect(request):
ss = results.stratifiedSample(10)
## create a dict with lists of article objects corresponding to the sample.
rs = {}
for label in ss.keys():
rs[label] = Article.objects.filter(id__in=ss[label])
## present sample using a template.
template = loader.get_template('scrape/inspect.html')
context = RequestContext(request, {'rs': rs})
return HttpResponse(template.render(context))
## transform ratings in the form format to a numeric format.
def transform_rating(rating):
if rating == 'on':
r = 1
else:
r = 0
return r
def rate(request):
if request.method == 'POST':
form = request.POST
## get the ratings.
## ratings are in the form ('rating_label.id_article.id', 'on'),
## for labels with checked checkboxes.
## So, split out the article and label id,
## so it can be used in creating Ratings below.
ks = form.items()
ks = [(k.split('_'),v) for (k,v) in ks if 'rating_' in k]
## store ratings in database so they can be used in recommend view.
rs = [Rating(label=int(k[1]), rating=transform_rating(v)) for (k,v) in ks]
Rating.objects.all().delete()
Rating.objects.bulk_create(rs)
return HttpResponseRedirect('/recommend')
else:
ss = results.stratifiedSample(1)
rs = {}
for label in ss.keys():
rs[label] = Article.objects.filter(id__in=ss[label])[0]
return render(request, 'scrape/rate.html', {'rs': rs})
def recommend(request):
lbls = [i.label for i in Rating.objects.filter(rating=1)]
ss = results.stratifiedSample(1)
to_rm = [k for k in ss.keys() if k not in lbls]
for k in to_rm:
del(ss[k])
rs = {}
for label in ss.keys():
rs[label] = Article.objects.filter(id__in=ss[label])
template = loader.get_template('scrape/inspect.html')
context = RequestContext(request, {'rs': rs})
# return HttpResponse(str(to_rm))
return HttpResponse(template.render(context))
|
ajerneck/rand-art
|
randart/scrape/views.py
|
Python
|
gpl-2.0
| 2,562
|
[
"exciting"
] |
906d82dc08c8fb602e4bf27508c0211c657ab3273195bdfe9827cb34e91452ca
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import gobject
import gtk, os, pango, sys
from zeroinstall import _
from zeroinstall.injector import model, writer
from zeroinstall import support
from zeroinstall.gtkui import gtkutils
import utils
def _build_stability_menu(impl):
menu = gtk.Menu()
upstream = impl.upstream_stability or model.testing
choices = list(model.stability_levels.values())
choices.sort()
choices.reverse()
def set(new):
if isinstance(new, model.Stability):
impl.user_stability = new
else:
impl.user_stability = None
writer.save_feed(impl.feed)
import main
main.recalculate()
item = gtk.MenuItem()
item.set_label(_('Unset (%s)') % _(str(upstream).capitalize()).lower())
item.connect('activate', lambda item: set(None))
item.show()
menu.append(item)
item = gtk.SeparatorMenuItem()
item.show()
menu.append(item)
for value in choices:
item = gtk.MenuItem()
item.set_label(_(str(value)).capitalize())
item.connect('activate', lambda item, v = value: set(v))
item.show()
menu.append(item)
return menu
rox_filer = 'http://rox.sourceforge.net/2005/interfaces/ROX-Filer'
# Columns
ITEM = 0
ARCH = 1
STABILITY = 2
VERSION = 3
FETCH = 4
UNUSABLE = 5
RELEASED = 6
NOTES = 7
WEIGHT = 8 # Selected item is bold
LANGS = 9
def get_tooltip_text(config, interface, impl):
if impl.local_path:
return _("Local: %s") % impl.local_path
if impl.id.startswith('package:'):
return _("Native package: %s") % impl.id.split(':', 1)[1]
if impl.is_available(config.stores):
return _("Cached: %s") % config.stores.lookup_any(impl.digests)
src = config.fetcher.get_best_source(impl)
if src:
size = support.pretty_size(src.size)
return _("Not yet downloaded (%s)") % size
else:
return _("No downloads available!")
class ImplementationList:
tree_view = None
model = None
interface = None
driver = None
def __init__(self, driver, interface, widgets):
self.interface = interface
self.driver = driver
self.model = gtk.ListStore(object, str, str, str, # Item, arch, stability, version,
str, gobject.TYPE_BOOLEAN, str, str, # fetch, unusable, released, notes,
int, str) # weight, langs
self.tree_view = widgets.get_widget('versions_list')
self.tree_view.set_model(self.model)
text = gtk.CellRendererText()
text_strike = gtk.CellRendererText()
stability = gtk.TreeViewColumn(_('Stability'), text, text = STABILITY)
for column in (gtk.TreeViewColumn(_('Version'), text_strike, text = VERSION, strikethrough = UNUSABLE, weight = WEIGHT),
gtk.TreeViewColumn(_('Released'), text, text = RELEASED, weight = WEIGHT),
stability,
gtk.TreeViewColumn(_('Fetch'), text, text = FETCH, weight = WEIGHT),
gtk.TreeViewColumn(_('Arch'), text_strike, text = ARCH, strikethrough = UNUSABLE, weight = WEIGHT),
gtk.TreeViewColumn(_('Lang'), text_strike, text = LANGS, strikethrough = UNUSABLE, weight = WEIGHT),
gtk.TreeViewColumn(_('Notes'), text, text = NOTES, weight = WEIGHT)):
self.tree_view.append_column(column)
self.tree_view.set_property('has-tooltip', True)
def tooltip_callback(widget, x, y, keyboard_mode, tooltip):
x, y = self.tree_view.convert_widget_to_bin_window_coords(x, y)
pos = self.tree_view.get_path_at_pos(x, y)
if pos:
self.tree_view.set_tooltip_cell(tooltip, pos[0], None, None)
path = pos[0]
row = self.model[path]
if row[ITEM]:
tooltip.set_text(get_tooltip_text(driver.config, interface, row[ITEM]))
return True
return False
self.tree_view.connect('query-tooltip', tooltip_callback)
def button_press(tree_view, bev):
if bev.button not in (1, 3):
return False
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
impl = self.model[path][ITEM]
global menu # Fix GC problem with PyGObject
menu = gtk.Menu()
stability_menu = gtk.MenuItem()
stability_menu.set_label(_('Rating'))
stability_menu.set_submenu(_build_stability_menu(impl))
stability_menu.show()
menu.append(stability_menu)
if not impl.id.startswith('package:') and impl.is_available(self.driver.config.stores):
def open():
os.spawnlp(os.P_WAIT, '0launch',
'0launch', rox_filer, '-d',
impl.local_path or self.driver.config.stores.lookup_any(impl.digests))
item = gtk.MenuItem()
item.set_label(_('Open cached copy'))
item.connect('activate', lambda item: open())
item.show()
menu.append(item)
item = gtk.MenuItem()
item.set_label(_('Explain this decision'))
item.connect('activate', lambda item: self.show_explaination(impl))
item.show()
menu.append(item)
if sys.version_info[0] < 3:
menu.popup(None, None, None, bev.button, bev.time)
else:
menu.popup(None, None, None, None, bev.button, bev.time)
self.tree_view.connect('button-press-event', button_press)
def show_explaination(self, impl):
reason = self.driver.solver.justify_decision(self.driver.requirements, self.interface, impl)
gtkutils.show_message_box(self.tree_view.get_toplevel(), reason, gtk.MESSAGE_INFO)
def get_selection(self):
return self.tree_view.get_selection()
def set_items(self, items):
self.model.clear()
selected = self.driver.solver.selections.get(self.interface, None)
for item, unusable in items:
new = self.model.append()
self.model[new][ITEM] = item
self.model[new][VERSION] = item.get_version()
self.model[new][RELEASED] = item.released or "-"
self.model[new][FETCH] = utils.get_fetch_info(self.driver.config, item)
if item.user_stability:
if item.user_stability == model.insecure:
self.model[new][STABILITY] = _('INSECURE')
elif item.user_stability == model.buggy:
self.model[new][STABILITY] = _('BUGGY')
elif item.user_stability == model.developer:
self.model[new][STABILITY] = _('DEVELOPER')
elif item.user_stability == model.testing:
self.model[new][STABILITY] = _('TESTING')
elif item.user_stability == model.stable:
self.model[new][STABILITY] = _('STABLE')
elif item.user_stability == model.packaged:
self.model[new][STABILITY] = _('PACKAGED')
elif item.user_stability == model.preferred:
self.model[new][STABILITY] = _('PREFERRED')
else:
self.model[new][STABILITY] = _(str(item.upstream_stability) or str(model.testing))
self.model[new][ARCH] = item.arch or _('any')
if selected is item:
self.model[new][WEIGHT] = pango.WEIGHT_BOLD
else:
self.model[new][WEIGHT] = pango.WEIGHT_NORMAL
self.model[new][UNUSABLE] = bool(unusable)
self.model[new][LANGS] = item.langs or '-'
self.model[new][NOTES] = unusable and _(unusable) or _('None')
def clear(self):
self.model.clear()
|
timdiels/0install
|
zeroinstall/0launch-gui/impl_list.py
|
Python
|
lgpl-2.1
| 6,820
|
[
"VisIt"
] |
0bf5a9d80df3e9cdb0a91a6242ae9cfbef780969933ea3d67f74a1e20b4aba76
|
# -*- coding: utf-8 -*-
import os
import os.path as osp
import sys
import argparse
import pickle
import six
import glob
from itertools import chain
from collections import defaultdict
import yaml
import xarray as xr
import psyplot
from psyplot.docstring import docstrings
from psyplot.warning import warn
from psyplot.compat.pycompat import get_default_value
from funcargparse import FuncArgParser
import logging
rcParams = psyplot.rcParams
logger = logging.getLogger(__name__)
def main(args=None):
"""Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
The parser that has been used from the command line"""
try:
from psyplot_gui import get_parser as _get_parser
except ImportError:
logger.debug('Failed to import gui', exc_info=True)
parser = get_parser(create=False)
parser.update_arg('output', required=True)
parser.create_arguments()
parser.parse2func(args)
else:
parser = _get_parser(create=False)
parser.create_arguments()
parser.parse_known2func(args)
@docstrings.get_sections(base='make_plot')
@docstrings.dedent
def make_plot(fnames=[], name=[], dims=None, plot_method=None,
output=None, project=None, engine=None, formatoptions=None,
tight=False, rc_file=None, encoding=None, enable_post=False,
seaborn_style=None, output_project=None,
concat_dim=get_default_value(xr.open_mfdataset, 'concat_dim'),
chname={}, preset=None):
"""
Eventually start the QApplication or only make a plot
Parameters
----------
fnames: list of str
Either the filenames to show, or, if the `project` parameter is set,
the a list of `,`-separated filenames to make a mapping from the
original filename to a new one
name: list of str
The variable names to plot if the `output` parameter is set
dims: dict
A mapping from coordinate names to integers if the `project` is not
given
plot_method: str
The name of the plot_method to use
output: str or list of str
If set, the data is loaded and the figures are saved to the specified
filename and now graphical user interface is shown
project: str
If set, the project located at the given file name is loaded
engine: str
The engine to use for opening the dataset (see
:func:`psyplot.data.open_dataset`)
formatoptions: dict
A dictionary of formatoption that is applied to the data visualized by
the chosen `plot_method`
tight: bool
If True/set, it is tried to figure out the tight bbox of the figure and
adjust the paper size of the `output` to it
rc_file: str
The path to a yaml configuration file that can be used to update the
:attr:`~psyplot.config.rcsetup.rcParams`
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
Enable the :attr:`~psyplot.plotter.Plotter.post` processing
formatoption. If True/set, post processing scripts are enabled in the
given `project`. Only set this if you are sure that you can trust the
given project file because it may be a security vulnerability.
seaborn_style: str
The name of the style of the seaborn package that can be used for
the :func:`seaborn.set_style` function
output_project: str
The name of a project file to save the project to
concat_dim: str
The concatenation dimension if multiple files in `fnames` are
provided
chname: dict
A mapping from variable names in the project to variable names in the
datasets that should be used instead
preset: str
The filename or identifier of a preset. If the given `preset` is
the path to an existing yaml file, it will be loaded. Otherwise we
look up the `preset` in the psyplot configuration directory (see
:func:`~psyplot.config.rcsetup.get_configdir`).
"""
if project is not None and (name != [] or dims is not None):
warn('The `name` and `dims` parameter are ignored if the `project`'
' parameter is set!')
if rc_file is not None:
rcParams.load_from_file(rc_file)
if dims is not None and not isinstance(dims, dict):
dims = dict(chain(*map(six.iteritems, dims)))
if len(output) == 1:
output = output[0]
if not fnames and not project:
raise ValueError(
"Either a filename or a project file must be provided if "
"the output parameter is set!")
elif project is None and plot_method is None:
raise ValueError(
"A plotting method must be provided if the output parameter "
"is set and not the project!")
if seaborn_style is not None:
import seaborn as sns
sns.set_style(seaborn_style)
import psyplot.project as psy
if project is not None:
fnames = [s.split(',') for s in fnames]
chname = dict(chname)
single_files = (l[0] for l in fnames if len(l) == 1)
alternative_paths = defaultdict(lambda: next(single_files, None))
alternative_paths.update([l for l in fnames if len(l) == 2])
p = psy.Project.load_project(
project, alternative_paths=alternative_paths,
engine=engine, encoding=encoding, enable_post=enable_post,
chname=chname)
if preset:
p.load_preset(preset)
if formatoptions is not None:
p.update(fmt=formatoptions)
p.export(output, tight=tight)
else:
pm = getattr(psy.plot, plot_method, None)
if pm is None:
raise ValueError("Unknown plot method %s!" % plot_method)
kwargs = {'name': name} if name else {}
p = pm(
fnames, dims=dims or {}, engine=engine, preset=preset,
fmt=formatoptions or {}, mf_mode=True, concat_dim=concat_dim,
**kwargs)
p.export(output, tight=tight)
if output_project is not None:
p.save_project(output_project)
return
def get_parser(create=True):
"""Return a parser to make that can be used to make plots or open files
from the command line
Returns
-------
psyplot.parser.FuncArgParser
The :class:`argparse.ArgumentParser` instance"""
#: The parse that is used to parse arguments from the command line
epilog = docstrings.get_sections(docstrings.dedent("""
Examples
--------
Here are some examples on how to use psyplot from the command line.
Plot the variable ``'t2m'`` in a netCDF file ``'myfile.nc'`` and save
the plot to ``'plot.pdf'``::
$ psyplot myfile.nc -n t2m -pm mapplot -o test.pdf
Create two plots for ``'t2m'`` with the first and second timestep on
the second vertical level::
$ psyplot myfile.nc -n t2m -pm mapplot -o test.pdf -d t,0,1 z,1
If you have save a project using the
:meth:`psyplot.project.Project.save_project` method into a file named
``'project.pkl'``, you can replot this via::
$ psyplot -p project.pkl -o test.pdf
If you use a different dataset than the one you used in the project
(e.g. ``'other_ds.nc'``), you can replace it via::
$ psyplot other_dataset.nc -p project.pkl -o test.pdf
or explicitly via::
$ psyplot old_ds.nc,other_ds.nc -p project.pkl -o test.pdf
You can also load formatoptions from a configuration file, e.g.::
$ echo 'title: my title' > fmt.yaml
$ psyplot myfile.nc -n t2m -pm mapplot -fmt fmt.yaml -o test.pdf
"""), 'parser', ['Examples'])
if _on_rtd: # make a rubric examples section
epilog = '.. rubric:: Examples\n' + '\n'.join(epilog.splitlines()[2:])
parser = FuncArgParser(
description="""
Load a dataset, make the plot and save the result to a file""",
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
info_grp = parser.add_argument_group(
'Info options',
'Options that print informations and quit afterwards')
parser.update_arg('version', short='V', long='version', action='version',
version=psyplot.__version__, if_existent=False,
group=info_grp)
parser.update_arg('all_versions', short='aV', long='all-versions',
action=AllVersionsAction, if_existent=False,
group=info_grp)
parser.update_arg('list_plugins', short='lp', long='list-plugins',
action=ListPluginsAction, if_existent=False,
group=info_grp)
parser.update_arg(
'list_plot_methods', short='lpm', long='list-plot-methods',
action=ListPlotMethodsAction, if_existent=False, group=info_grp)
parser.update_arg(
'list_datasets', short='lds', long='list-datasets',
action=ListDsNamesAction, if_existent=False, group=info_grp,
help="""List the used dataset names in the given `project`.""")
parser.update_arg(
'list_presets', short='lps', long='list-presets',
action=ListPresetsAction, if_existent=False, group=info_grp)
parser.setup_args(make_plot)
output_grp = parser.add_argument_group(
'Output options',
'Options that only have an effect if the `-o` option is set.')
parser.update_arg('fnames', positional=True, nargs='*')
parser.update_arg('name', short='n', nargs='*', metavar='variable_name',
const=None)
parser.update_arg('dims', short='d', nargs='+', type=_load_dims,
metavar='dim,val1[,val2[,...]]')
pm_choices = {pm for pm, d in filter(
lambda t: t[1].get('plot_func', True),
six.iteritems(rcParams['project.plotters']))}
if psyplot._project_imported:
import psyplot.project as psy
pm_choices.update(set(psy.plot._plot_methods))
parser.update_arg('plot_method', short='pm', choices=pm_choices,
metavar='{%s}' % ', '.join(map(repr, pm_choices)))
parser.update_arg('output', short='o', group=output_grp)
parser.update_arg('output_project', short='op', group=output_grp)
parser.update_arg('project', short='p')
parser.update_arg(
'formatoptions', short='fmt', type=_load_dict, help="""
The path to a yaml (``'.yml'`` or ``'.yaml'``) or pickle file
defining a dictionary of formatoption that is applied to the data
visualized by the chosen `plot_method`""", metavar='FILENAME')
parser.update_arg(
'chname', type=lambda s: s.split(','), nargs='*', help="""
A mapping from variable names in the project to variable names in the
datasets that should be used instead. Variable names should be
separated by a comma.""", metavar='project-variable,variable-to-use')
parser.update_arg('tight', short='t', group=output_grp)
parser.update_arg('rc_file', short='rc')
parser.pop_key('rc_file', 'metavar')
parser.update_arg('encoding', short='e')
parser.pop_key('enable_post', 'short')
parser.update_arg('seaborn_style', short='sns')
parser.update_arg('concat_dim', short='cd')
if create:
parser.create_arguments()
return parser
def _load_dict(fname):
with open(fname) as f:
if fname.endswith('.yml') or fname.endswith('.yaml'):
return yaml.load(f, Loader=yaml.SafeLoader)
return pickle.load(f)
def _load_dims(s):
s = s.split(',')
if len(s) > 1:
return {s[0]: list(map(int, s[1:]))}
return {}
#: Disable the default for the info actions on RTD, because it looks
#: better in the docs
_on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
class AllVersionsAction(argparse.Action):
def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None,
default=argparse.SUPPRESS, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
kwargs['help'] = ("Print the versions of all plugins and requirements "
"and exit")
if not _on_rtd:
kwargs['default'] = default
super(AllVersionsAction, self).__init__(
option_strings, nargs=0, dest=dest,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print(yaml.dump(psyplot.get_versions(), default_flow_style=False))
sys.exit(0)
class ListPresetsAction(argparse.Action):
def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None,
default=argparse.SUPPRESS, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
kwargs['help'] = ("Print available presets and exit")
if not _on_rtd:
kwargs['default'] = default
super().__init__(option_strings, nargs=0, dest=dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from psyplot.config.rcsetup import get_configdir
presets_dir = osp.join(get_configdir(), 'presets')
if not osp.exists(presets_dir):
sys.exit(0)
else:
presets = {osp.splitext(osp.basename(fname))[0]: fname
for fname in glob.glob(osp.join(presets_dir, '*.yml'))}
print('\n'.join(map(': '.join, presets.items())))
sys.exit(0)
class ListPluginsAction(argparse.Action):
def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None,
default=argparse.SUPPRESS, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
kwargs['help'] = ("Print the names of the plugins and exit")
if not _on_rtd:
kwargs['default'] = default
super(ListPluginsAction, self).__init__(
option_strings, nargs=0, dest=dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print(yaml.dump(psyplot.rcParams._plugins, default_flow_style=False))
sys.exit(0)
class ListPlotMethodsAction(argparse.Action):
def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None,
default=argparse.SUPPRESS, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
kwargs['help'] = "List the available plot methods and what they do"
if not _on_rtd:
kwargs['default'] = default
super(ListPlotMethodsAction, self).__init__(
option_strings, nargs=0, dest=dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
pm_choices = {}
for pm, d in filter(lambda t: t[1].get('plot_func', True),
six.iteritems(rcParams['project.plotters'])):
pm_choices[pm] = d.get('summary') or (
'Open and plot data via :class:`%s.%s` plotters' % (
d['module'], d['plotter_name']))
if psyplot._project_imported:
import psyplot.project as psy
pm_choices.update(psy.plot._plot_methods)
print(yaml.dump(pm_choices, default_flow_style=False))
sys.exit(0)
class ListDsNamesAction(argparse.Action):
"""An action to list the used file names in a project"""
def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None,
default=argparse.SUPPRESS, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
if not _on_rtd:
kwargs['default'] = default
super(ListDsNamesAction, self).__init__(
option_strings, nargs=0, dest=dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if namespace.project is None:
print('A project is required before this argument! Call syntax:\n'
'%s -p <project-file>.pkl %s' % (parser.prog, option_string))
sys.exit(1)
import psyplot.data as psyd
import pickle
with open(namespace.project, 'rb') as f:
d = pickle.load(f)['arrays']
names = list(filter(None, (
t[0] for t in psyd.ArrayList._get_dsnames(d))))
if names:
print(yaml.dump(names, default_flow_style=False))
sys.exit(0)
if __name__ == '__main__':
main()
|
Chilipp/psyplot
|
psyplot/__main__.py
|
Python
|
gpl-2.0
| 16,946
|
[
"NetCDF"
] |
dca280fc9e67d06502659636ec07ce1bfbf205881303654930026d9769a757eb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
spectro14: FAME-UHD (ESRF/BM16) X-ray emission spectrometer
===========================================================
"""
import math
import numpy as np
from sloth.utils.bragg import (
HC,
SI_ALAT,
GE_ALAT,
d_cubic,
get_dspacing,
kev2wlen,
wlen2kev,
kev2ang,
ang2kev,
)
def calc_det_dzh(theta):
"""Calculate detector vertical offset of the top raw
.. note:: formula taken from Table 2 on pag. 68 of CE document
vol. 3 (Olivier Proux et al.), theta in deg
"""
return 919.49 - 27.018 * theta + 0.26209 * theta ** 2 - 0.00083803 * theta ** 3
def calc_det_dzb(theta):
"""Calculate detector vertical offset of the bottom raw
.. note:: formula taken from Table 2 on pag. 68 of CE document
vol. 3 (Olivier Proux et al.), theta in deg
"""
return -677.96 + 19.121 * theta - 0.17315 * theta ** 2 + 0.00049335 * theta ** 3
def calc_det_dz(theta):
"""Calc detector vertical offset from focus both rows"""
return abs(calc_det_dzh(theta)) + abs(calc_det_dzb(theta))
def calc_pos_com(emi, d=0, r=0, dz=0, sz=0):
"""Get spectrometer positions for common axes
Parameters
----------
emi : emission energy [keV]
d : analyser d-spacing [nm]
r : crystal bending radius (=diameter Rowland circle) [mm]
dz : offset in z from the central row [mm]
sz : offset in z of the sample from top table [mm]
Returns
-------
dictionary with real motors positions
{
"xs" : float, #horizontal position central row [mm]
"zeq" : float, #vertical position central row [mm]
"zd" : float, #vertical position detector [mm]
"xh" : float, #hor correction top row [mm]
"xb" : float, #hor correction bottom row [mm]
"thetah" : float, #theta correction top row [deg]
"thetab" : float #theta correction bottom row [deg]
}
"""
_rtheta = kev2ang(emi, d, deg=False)
_xs = r * math.sin(_rtheta) * math.sin(_rtheta)
_zeq = r * math.sin(_rtheta) * math.cos(_rtheta)
_zd = 2 * _zeq # detector
_rsth = r * math.sin(_rtheta)
try:
_xh = math.sqrt(_rsth ** 2 - (_zeq + dz) ** 2) - _xs
_xb = math.sqrt(_rsth ** 2 - (_zeq - dz) ** 2) - _xs
_thetah = math.degrees(
math.atan((_zeq + dz) / (_xh + _xs)) + _rtheta - (math.pi / 2.0)
)
_thetab = math.degrees(
math.atan((_zeq - dz) / (_xb + _xs)) + _rtheta - (math.pi / 2.0)
)
except Exception:
_xh = 0.0
_xb = 0.0
_thetah = 0.0
_thetab = 0.0
_com_dict = {
"xs": _xs,
"zeq": _zeq + sz,
"zd": _zd + sz,
"xh": _xh,
"xb": _xb,
"thetah": _thetah,
"thetab": _thetab,
}
return _com_dict
def calc_pos_mod(nmodule):
"""get positions per module
"""
pass
def show_spectro_overview(theta, d=None, r=1000.0, dz=82.0, sz=500.0, retdict=False):
"""show an overview of the spectrometer geometry calculations"""
if d is not None:
ene = ang2kev(theta, d)
else:
ene = "no dspacing"
rtheta = math.radians(theta)
p = r * math.sin(rtheta)
xs = r * math.sin(rtheta) ** 2
zeq = r * math.sin(rtheta) * math.cos(rtheta)
xsh = math.sqrt(p ** 2 - (zeq + dz) ** 2)
xsb = math.sqrt(p ** 2 - (zeq - dz) ** 2)
xh = xsh - xs
xb = xsb - xs
rth = math.acos(xsh / p) + rtheta - math.pi / 2.0
rtb = math.acos(xsb / p) + rtheta - math.pi / 2.0
th = math.degrees(rth)
tb = math.degrees(rtb)
#: using SolidWorks model v1804 (TODO: check!!!)
xdb = 2 * zeq * math.sin(abs(rtb))
zdb = xdb / math.tan(rtheta + rtb)
dzh = calc_det_dzh(theta)
dzb = calc_det_dzb(theta)
dxh = dzh / math.tan(math.pi / 2.0 - rtheta)
dxb = dzb / math.tan(math.pi / 2.0 - rtheta)
# ADD SAMPLE Z OFFSET
zd = zeq * 2 + sz
zeq += sz
outdict = {
"theta": theta,
"ene": ene,
"r": r,
"dz": dz,
"sz": sz,
"p": p,
"xs": xs,
"zeq": zeq,
"zd": zd,
"xh": xh,
"th": th,
"xb": xb,
"tb": tb,
"dzh": dzh,
"dzb": dzb,
"dxh": dxh,
"dxb": dxb,
"xdb": xdb,
"zdb": zdb,
"str_h": " top ",
"str_b": " bottom ",
"str_det": " DETECTOR ",
"str_ana": " CRYSTALS TABLE ",
"nl": "\n",
}
outstr = "\
============================{nl}\
SPECTRO14 POSITIONS OVERVIEW{nl}\
============================{nl}\
CONFIG: theta = {theta:>10.4f} (ene = {ene}), r = {r:>10.3f}, dz = {dz:>10.3f}, sz = {sz:>10.3f}{nl}\
UNITS: deg, eV, mm{nl}\
{str_ana:=^40}{nl}\
p = {p:>10.3f}{nl}\
xs = {xs:>10.3f}{nl}\
zeq = {zeq:>10.3f}{nl}\
zd = {zd:>10.3f}{nl}\
{str_h:=^16} | {str_b:=^16}{nl}\
xh = {xh:>10.3f} | xb = {xb:>10.3f}{nl}\
th = {th:>10.3f} | tb = {tb:>10.3f}{nl}\
{str_det:=^40}{nl}\
{str_h:=^16} | {str_b:=^16}{nl}\
dzh = {dzh:>10.3f} | dzb = {dzb:>10.3f}{nl}\
dxh = {dxh:>10.3f} | dxb = {dxb:>10.3f}{nl}\
(=== SW 1804: abolute detector offsets ===){nl}\
(dyb = {xdb:>10.3f}){nl}\
(dzb = {zdb:>10.3f}){nl}\
"
if retdict:
return outdict
else:
print(outstr.format(**outdict))
# FOR TESTS #
if __name__ == "__main__":
si555 = d_cubic(SI_ALAT, (5, 5, 5))
show_spectro_overview(80, d=si555)
pass
|
maurov/xraysloth
|
sloth/inst/spectro14.py
|
Python
|
bsd-3-clause
| 5,511
|
[
"CRYSTAL"
] |
1abf29ca288334e465725f5cb37959ef7b08779cd453dfb7618dec5488b5fbe9
|
# Original code: Copyright 2014 The University of Melbourne
# Copyright 2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
"""Test all pages render (without exceptions)."""
from __future__ import print_function, unicode_literals
import six
import re
import unittest
from django.conf import settings
from django.contrib.admindocs.views import simplify_regex
from django.test import TestCase
from django.utils.text import slugify
from django.utils.encoding import smart_text
from django_extensions.management.commands.show_urls import \
extract_views_from_urlpatterns
urlconf = __import__(settings.ROOT_URLCONF, {}, {}, [''])
def make_test_get_function(name, url, url_pattern):
def test_get(self):
self.assertEqual(
self.client.login(username='kgsuper', password='aq12ws'),
True,
'Login failed.',
)
resp = self.client.get(url, follow=True)
self.assertIn(
resp.status_code,
[200, 400, 403],
'HTTP Error {}: {} > {}'.format(
resp.status_code,
url_pattern,
url,
),
)
test_get.__name__ = str(name)
return test_get
class TestAllPagesMeta(type):
@classmethod
def _add_test_methods(mcs, attrs, urlpatterns):
# loop through every URL pattern
for index, (func, regex, url_name) in enumerate(
extract_views_from_urlpatterns(urlpatterns)):
if func.__module__.startswith("%s." % attrs['module']):
pass
elif func.__module__ == attrs['module']:
pass
else:
continue
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
url_pattern = smart_text(simplify_regex(regex))
name = '_'.join(
[
'test',
func.__module__.replace('.', '_'),
slugify('%s' % func_name),
] + slugify(
url_pattern.replace('/', '_') or 'root'
).replace('_', ' ').split(),
)
url = url_pattern
for key, value in attrs['variables'].items():
url = url.replace('<%s>' % key, value)
# bail out if we don't know how to visit this URL properly
testfunc = unittest.skipIf(
any(
re.search(stop_pattern, url)
for stop_pattern
in [
r'<.*>',
]
),
'URL pattern %r contains stop pattern.' % url,
)(
make_test_get_function(name, url, url_pattern),
)
attrs[name] = testfunc
def __new__(mcs, name, parents, attrs):
if parents != (TestCase,):
mcs._add_test_methods(attrs, urlconf.urlpatterns)
return super(TestAllPagesMeta, mcs).__new__(mcs, name, parents, attrs)
@six.add_metaclass(TestAllPagesMeta)
class TestAllPagesCase(TestCase):
pass
|
monash-merc/karaage
|
karaage/tests/client.py
|
Python
|
gpl-3.0
| 3,908
|
[
"VisIt"
] |
6c03a79b01c09e2036cd22c8d35f6c69f1b14a2fc328716de080d334851a1d88
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for functions and classes specific to reciprocal space
calculations.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from .utils import verbosedict
from collections import namedtuple
import time
try:
from pyFAI import geometry as geo
except ImportError:
geo = None
import logging
logger = logging.getLogger(__name__)
def process_to_q(setting_angles, detector_size, pixel_size,
calibrated_center, dist_sample, wavelength, ub,
frame_mode=None):
"""
This will compute the hkl values for all pixels in a shape specified by
detector_size.
Parameters
----------
setting_angles : ndarray
six angles of all the images - Required shape is [num_images][6] and
required type is something that can be cast to a 2D numpy array
Angle order: delta, theta, chi, phi, mu, gamma (degrees)
detector_size : tuple
2 element tuple defining the number of pixels in the detector. Order is
(num_columns, num_rows)
pixel_size : tuple
2 element tuple defining the size of each pixel in mm. Order is
(column_pixel_size, row_pixel_size). If not in mm, must be in the same
units as `dist_sample`
calibrated_center : tuple
2 element tuple defining the center of the detector in pixels. Order
is (column_center, row_center)(x y)
dist_sample : float
distance from the sample to the detector (mm). If not in mm, must be
in the same units as `pixel_size`
wavelength : float
wavelength of incident radiation (Angstroms)
ub : ndarray
UB matrix (orientation matrix) 3x3 matrix
frame_mode : str, optional
Frame mode defines the data collection mode and thus the desired
output from this function. Defaults to hkl mode (frame_mode=4)
'theta' : Theta axis frame.
'phi' : Phi axis frame.
'cart' : Crystal cartesian frame.
'hkl' : Reciprocal lattice units frame.
See the `process_to_q.frame_mode` attribute for an exact list of
valid options.
Returns
-------
hkl : ndarray
(Qx, Qy, Qz) - HKL values
shape is [num_images * num_rows * num_columns][3]
Notes
-----
Six angles of an image: (delta, theta, chi, phi, mu, gamma )
These axes are defined according to the following references.
References: text [1]_, text [2]_
.. [1] M. Lohmeier and E.Vlieg, "Angle calculations for a six-circle
surface x-ray diffractometer," J. Appl. Cryst., vol 26, pp 706-716,
1993.
.. [2] E. Vlieg, "A (2+3)-Type surface diffractometer: Mergence of the
z-axis and (2+2)-Type geometries," J. Appl. Cryst., vol 31, pp 198-203,
1998.
"""
try:
from ..ext import ctrans
except ImportError:
raise NotImplementedError(
"ctrans is not available on your platform. See"
"https://github.com/scikit-beam/scikit-beam/issues/418"
"to follow updates to this problem.")
# Set default threads
# set default frame_mode
if frame_mode is None:
frame_mode = 4
else:
str_to_int = verbosedict((k, j + 1) for j, k
in enumerate(process_to_q.frame_mode))
frame_mode = str_to_int[frame_mode]
# ensure the ub matrix is an array
ub = np.asarray(ub)
# ensure setting angles is a 2-D
setting_angles = np.atleast_2d(setting_angles)
if setting_angles.ndim != 2:
raise ValueError('setting_angles is expected to be a 2-D array with'
' dimensions [num_images][num_angles]. You provided '
'an array with dimensions {0}'
''.format(setting_angles.shape))
if setting_angles.shape[1] != 6:
raise ValueError('It is expected that there should be six angles in '
'the setting_angles parameter. You provided {0}'
' angles.'.format(setting_angles.shape[1]))
# *********** Converting to Q **************
# starting time for the process
t1 = time.time()
# ctrans - c routines for fast data analysis
hkl = ctrans.ccdToQ(angles=setting_angles * np.pi / 180.0,
mode=frame_mode,
ccd_size=(detector_size),
ccd_pixsize=(pixel_size),
ccd_cen=(calibrated_center),
dist=dist_sample,
wavelength=wavelength,
UBinv=np.matrix(ub).I)
# ending time for the process
t2 = time.time()
logger.info("Processing time for {0} {1} x {2} images took {3} seconds."
"".format(setting_angles.shape[0], detector_size[0],
detector_size[1], (t2 - t1)))
return hkl
# Assign frame_mode as an attribute to the process_to_q function so that the
# autowrapping knows what the valid options are
process_to_q.frame_mode = ['theta', 'phi', 'cart', 'hkl']
def hkl_to_q(hkl_arr):
"""
This module compute the reciprocal space (q) values from known HKL array
for each pixel of the detector for all the images
Parameters
----------
hkl_arr : ndarray
(Qx, Qy, Qz) - HKL array
shape is [num_images * num_rows * num_columns][3]
Returns
-------
q_val : ndarray
Reciprocal values for each pixel for all images
shape is [num_images * num_rows * num_columns]
"""
return np.linalg.norm(hkl_arr, axis=1)
def calibrated_pixels_to_q(detector_size, pyfai_kwargs):
"""
For a given detector and pyfai calibrated geometry give back the q value
for each pixel in the detector.
Parameters
-----------
detector_size : tuple
2 element tuple defining the number of pixels in the detector. Order is
(num_columns, num_rows)
pyfai_kwargs: dict
The dictionary of pyfai geometry kwargs, given by pyFAI's calibration
Ex: dist, poni1, poni2, rot1, rot2, rot3, splineFile, wavelength,
detector, pixel1, pixel2
Returns
-------
q_val : ndarray
Reciprocal values for each pixel shape is [num_rows * num_columns]
"""
if geo is None:
raise RuntimeError("You must have pyFAI installed to use this "
"function.")
a = geo.Geometry(**pyfai_kwargs)
return a.qArray(detector_size)
gisaxs_output = namedtuple(
'gisaxs_output',
['alpha_i', 'theta_f',
'alpha_f', 'tilt_angle',
'qx', 'qy', 'qz', 'qr']
)
def gisaxs(incident_beam, reflected_beam, pixel_size, detector_size,
dist_sample, wavelength, theta_i=0.0):
"""
This function will provide scattering wave vector(q) components(x, y, z),
q parallel and incident and reflected angles for grazing-incidence small
angle X-ray scattering (GISAXS) geometry.
Parameters
----------
incident_beam : tuple
x and y co-ordinates of the incident beam in pixels
reflected_beam : tuple
x and y co-ordinates of the reflected beam in pixels
pixel_size : tuple
pixel_size in um
detector_size: tuple
2 element tuple defining no. of pixels(size) in the
detector X and Y direction
dist_sample : float
sample to detector distance, in meters
wavelength : float
wavelength of the x-ray beam in Angstroms
theta_i : float, optional
out of plane angle, default 0.0
Returns
-------
namedtuple
`gisaxs_output` object is returned
This `gisaxs_output` object contains, in this order:
- alpha_i : float
incident angle
- theta_f : array
out of plane angle
shape (detector_size[0], detector_size[1])
- alpha_f : array
exit angle
shape (detector_size[0], detector_size[1])
- tilt_angle : float
tilt angle
- qx : array
x component of the scattering wave vector
shape (detector_size[0], detector_size[1])
- qy : array
y component of the scattering wave vector
shape (detector_size[0], detector_size[1])
- qz : array
z component of the scattering wave vector
shape (detector_size[0], detector_size[1])
- qr : array
q parallel component
shape (detector_size[0], detector_size[1])
Notes
-----
This implementation is based on published work. [1]_
References
----------
.. [1] R. Lazzari, "IsGISAXS: a program for grazing-incidence small-
angle X-ray scattering analysis of supported islands," J. Appl.
Cryst., vol 35, p 406-421, 2002.
"""
inc_x, inc_y = incident_beam
refl_x, refl_y = reflected_beam
# convert pixel_size to meters
pixel_size = np.asarray(pixel_size) * 10 ** (-6)
# tilt angle
tilt_angle = np.arctan2((refl_x - inc_x) * pixel_size[0],
(refl_y - inc_y) * pixel_size[1])
# incident angle
alpha_i = np.arctan2((refl_y - inc_y) * pixel_size[1],
dist_sample) / 2.
y, x = np.indices(detector_size)
# exit angle
alpha_f = np.arctan2((y - inc_y) * pixel_size[1],
dist_sample) - alpha_i
# out of plane angle
two_theta = np.arctan2((x - inc_x) * pixel_size[0],
dist_sample)
theta_f = two_theta / 2 - theta_i
# wave number
wave_number = 2*np.pi/wavelength
# x component
qx = (np.cos(alpha_f) * np.cos(2*theta_f) -
np.cos(alpha_i) * np.cos(2*theta_i)) * wave_number
# y component
# the variables post-fixed with an underscore are intermediate steps
qy_ = (np.cos(alpha_f) * np.sin(2*theta_f) -
np.cos(alpha_i) * np.sin(2*theta_i))
qz_ = np.sin(alpha_f) + np.sin(alpha_i)
qy = (qz_ * np.sin(tilt_angle) + qy_ * np.cos(tilt_angle)) * wave_number
# z component
qz = (qz_ * np.cos(tilt_angle) - qy_ * np.sin(tilt_angle)) * wave_number
# q parallel
qr = np.sqrt(qx**2 + qy**2)
return gisaxs_output(alpha_i, theta_f, alpha_f, tilt_angle, qx, qy, qz, qr)
|
licode/scikit-xray
|
skbeam/core/recip.py
|
Python
|
bsd-3-clause
| 12,782
|
[
"CRYSTAL"
] |
ed317cf8d5729f23921dc35856f36f01e5a3c452eb70bccb5af9448f94cf7e58
|
from theory import *
from galaxy import *
from orbitreducer import *
from pyoglue import *
|
adrn/GalaxySynth
|
synthetic/__init__.py
|
Python
|
mit
| 91
|
[
"Galaxy"
] |
8a32d6c3c078623e3d468294cf093bbb9c474288e03d458f106574f0d2b4e728
|
"""
Main module from which the application is started and the web interface mounted
To start the application directly using the python web server, you can just do
::
python web.py
Refer to server installation documentation for more details how to deploy in production.
"""
from octopus.core import app, initialise, add_configuration
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true", help="pycharm debug support enable")
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
pycharm_debug = app.config.get('DEBUG_PYCHARM', False)
if args.debug:
pycharm_debug = True
if pycharm_debug:
app.config['DEBUG'] = False
import pydevd
pydevd.settrace(app.config.get('DEBUG_SERVER_HOST', 'localhost'), port=app.config.get('DEBUG_SERVER_PORT', 51234), stdoutToServer=True, stderrToServer=True)
print "STARTED IN REMOTE DEBUG MODE"
initialise()
# most of the imports should be done here, after initialise()
from service.view.oaipmh import blueprint as oai
app.register_blueprint(oai)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=app.config['DEBUG'], port=app.config['PORT'], threaded=False)
|
JiscPER/jper-oaipmh
|
service/web.py
|
Python
|
apache-2.0
| 1,408
|
[
"Octopus"
] |
fbbbf91d23c6805d1ba6b4477a2e060a632a7b14757aed201fd0dae97cf9348e
|
## \file
## \ingroup tutorial_roofit
## \notebook -nodraw
## Data and categories: latex printing of lists and sets of RooArgSets
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Setup composite pdf
# --------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Sum the signal components into a composite signal pdf
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Build Chebychev polynomial pdf
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg1 = ROOT.RooChebychev("bkg1", "Background 1",
x, ROOT.RooArgList(a0, a1))
# Build expontential pdf
alpha = ROOT.RooRealVar("alpha", "alpha", -1)
bkg2 = ROOT.RooExponential("bkg2", "Background 2", x, alpha)
# Sum the background components into a composite background pdf
bkg1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in background", 0.2, 0., 1.)
bkg = ROOT.RooAddPdf(
"bkg", "Signal", ROOT.RooArgList(bkg1, bkg2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model", "g1+g2+a", ROOT.RooArgList(bkg, sig), ROOT.RooArgList(bkgfrac))
# Make list of parameters before and after fit
# ----------------------------------------------------------------------------------------
# Make list of model parameters
params = model.getParameters(ROOT.RooArgSet(x))
# Save snapshot of prefit parameters
initParams = params.snapshot()
# Do fit to data, obtain error estimates on parameters
data = model.generate(ROOT.RooArgSet(x), 1000)
model.fitTo(data)
# Print LateX table of parameters of pdf
# --------------------------------------------------------------------------
# Print parameter list in LaTeX for (one column with names, column with
# values)
params.printLatex()
# Print parameter list in LaTeX for (names values|names values)
params.printLatex(ROOT.RooFit.Columns(2))
# Print two parameter lists side by side (name values initvalues)
params.printLatex(ROOT.RooFit.Sibling(initParams))
# Print two parameter lists side by side (name values initvalues|name
# values initvalues)
params.printLatex(ROOT.RooFit.Sibling(initParams), ROOT.RooFit.Columns(2))
# Write LaTex table to file
params.printLatex(ROOT.RooFit.Sibling(initParams),
ROOT.RooFit.OutputFile("rf407_latextables.tex"))
|
root-mirror/root
|
tutorials/roofit/rf407_latextables.py
|
Python
|
lgpl-2.1
| 3,044
|
[
"Gaussian"
] |
7fccf783110968907c67d43c8e851b28961a6f18170817b4f15cfc1783aca3a0
|
#coding:utf-8
import os, sys
import timeit
import numpy, math
import scipy.spatial.distance as sp
import common_functions
################################################
# Parameters
################################################
# define the default parameters
train = "DR1"
test = "DR2"
lesions = ["exsudato-duro","hemorragia-superficial","hemorragia-profunda","lesoes-vermelhas","mancha-algodonosa","drusas-maculares"]
techniquesLow = ["sparse","dense"]
techniquesMid = ["hard","semi","soft"]
image = ""
# ShowOptions function
def showOptions():
print "-h : show options"
print "-train dataset : define the training dataset (default DR1)\n\tDR1 -- DR1 as the training dataset\n\tDR2 -- DR2 as the training dataset"
print "-test dataset : define test dataset (default DR2)\n\tDR1 -- DR1 as the test dataset\n\tDR2 -- DR2 as the test dataset"
print "-l lesion : define a specific DR-related lesion (default [exsudato-duro, hemorragia-superficial, hemorragia-profunda, lesoes-vermelhas, mancha-algodonosa, drusas-maculares)\n\texsudato-duro\t\t -- Hard Exudates\n\themorragia-superficial\t -- Superficial Hemorrhages\n\themorragia-profunda\t -- Deep Hemorrhages\n\tlesoes-vermelhas\t -- Red Lesions\n\tmancha-algodonosa\t -- Cotton-wool Spots\n\tdrusas-maculares\t -- Drusen"
print "-low technique : define a specific low-level technique (default [sparse, dense])\n\tsparse -- Sparse low-level feature extraction\n\tdense -- Dense low-level feature extraction"
print "-mid technique : define a specific mid-level technique (default [hard, semi, soft])\n\thard -- Hard-Sum coding/pooling\n\tsemi -- Semi-Soft-Sum coding/pooling\n\tsoft -- Soft-Max coding/pooling"
print "-i image : define the image name (used only for cases where we are interested in describing only one image)"
quit()
# take the parameters
if len(sys.argv) > 1:
for i in range(1, len(sys.argv),2):
op = sys.argv[i]
if op == "-h": showOptions()
elif op == "-train": train = sys.argv[i+1]
elif op == "-test": test = sys.argv[i+1]
elif op == "-l": lesions = [sys.argv[i+1]]
elif op == "-low": techniquesLow = [sys.argv[i+1]]
elif op == "-mid": techniquesMid = [sys.argv[i+1]]
elif op == "-i": image = sys.argv[i+1]
################################################
################################################
# create directories
################################################
directory = "mid-level/"
for techniqueMid in techniquesMid:
for techniqueLow in techniquesLow:
for type in [train,test]:
for lesion in lesions:
if not os.path.exists(directory + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion):
os.makedirs(directory + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion)
################################################
################################################
# HARD-SUM
################################################
def hardSum(PoIs, Words, ArqOut, numeroPalavras, label):
ArqOut = open(ArqOut,"wb")
ArqOut.write(label + " ")
histograma = [0 for i in range(numeroPalavras)]
distMatrix = sp.cdist(PoIs, Words, 'euclidean') # first points, after codewords. Return a len(PoIs) x len(Words) matrix of distances
for i in range(len(PoIs)):
minimum = min(distMatrix[i])
ind = numpy.where(distMatrix[i]==minimum)[0][0]
histograma[ind] += 1
histograma = common_functions.l1norm(histograma)
for h in histograma:
ArqOut.write(str(h) + " ")
ArqOut.close()
################################################
################################################
# SEMI-SOFT
################################################
def semiSoft(PoIs, Words, ArqOut, numeroPalavras, label):
ArqOut = open(ArqOut,"wb")
ArqOut.write(label + " ")
distances = numpy.zeros(numeroPalavras)
distances = sp.cdist(Words, PoIs, 'euclidean') # first codewords, after PoIs. Return a len(Words) x len(PoIs) matrix of distances
distances = [ 1/min(d) for d in distances ]
distances = common_functions.l1norm(distances)
for d in distances:
ArqOut.write(str(d) + " ")
ArqOut.close()
################################################
################################################
# SOFT-MAX
################################################
def gaussiankernel(sigma, x):
return (1.0/(math.sqrt(sigma*2*math.pi)))*math.exp(-(x)**2/(2.0*sigma**2))
def softMax(PoIs, Words, ArqOut, numeroPalavras, label):
ArqOut = open(ArqOut,"wb")
ArqOut.write(label + " ")
# distances - Matriz n * V (número de pontos * número de palavras) que calculará apenas uma vez as distâncias
distances = sp.cdist(PoIs, Words, 'euclidean') # first points, after codewords. Return a len(PoIs) x len(Words) matrix of distances
distances = [ gaussiankernel(45.0, dist) for dist in numpy.reshape(distances, (1,distances.size))[0] ] # apply the gaussian kernel
distances = numpy.reshape(distances, (len(PoIs), len(Words))) # put again in the format len(PoIs) x len(Words)
# distToAll - Vetor que armazenará para cada ponto o somatório das distâncias para todas as palavras
distToAll = []
for point in distances:
distToAll.append(sum(point))
distToAll = numpy.asarray(distToAll)
features = []
distances = numpy.transpose(distances) # transpose. Format len(Words) x len(PoIs)
division = numpy.divide(distances, distToAll) # Equivalent to divide the distance of codeword i to PoI j by the summation of the distances of PoI j to all codewords
features = [ max(dist) for dist in division ] # get the maximum activation for each codeword
features = common_functions.l1norm(features)
for f in features:
ArqOut.write(str(f) + " ")
ArqOut.close()
################################################
################################################
# MAIN
################################################
en = dict(zip(["exsudato-duro","hemorragia-superficial","hemorragia-profunda","lesoes-vermelhas","mancha-algodonosa","drusas-maculares","imagem-normal"], ["Hard Exudates","Superficial Hemorrhages","Deep Hemorrhages","Red Lesions","Cotton-wool Spots","Drusen","Normal Images"]))
print "################################################"
print "# Mid-level feature extraction"
print "################################################"
for techniqueMid in techniquesMid:
for techniqueLow in techniquesLow:
if techniqueLow == "sparse": size = 500
else: size = 1500
for type in [train,test]:
for lesion in lesions:
print "Extracting features for " + en[lesion] + "\nLow-level: " + techniqueLow + "\nMid-level: " + techniqueMid
start = timeit.default_timer()
sys.stdout.write(". ")
sys.stdout.flush()
# get the codebook
CodebookTemp = open("codebooks/" + techniqueLow + "/complete-codebook-" + lesion + ".cb", "rb").readlines()
Codebook = []
for cb in CodebookTemp[1:]:
Codebook.append([ float(c) for c in cb.split() ])
Codebook = numpy.asarray(Codebook)
# define the directory of the input file (points of interest)
if image == "":
PoIsDir = "low-level/" + techniqueLow + "/" + type + "/"
else:
PoIsDir = "low-level/" + techniqueLow + "/DR2/"
# define the directory of the output file (histogram)
if image == "":
OutDir = "mid-level/" + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion + "/"
else: # Interest in describing only one image
if not os.path.exists("mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion):
os.makedirs("mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion)
OutDir = "mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion + "/"
for label in ["+1","-1"]:
# describe the normal images
if label == "-1": lesion = "imagem-normal"
lesion_en = en[lesion]
if image == "":
if type == "DR2" and (lesion == "hemorragia-superficial" or lesion == "hemorragia-profunda"):
listImages = os.listdir("datasets/" + type + "-images-by-lesions/Red Lesions")
else:
listImages = os.listdir("datasets/" + type + "-images-by-lesions/" + lesion_en)
else: listImages = [image]
for im in listImages:
im_special = common_functions.specialName(im)
if os.path.exists(OutDir + im[:-3] + "hist"): continue
# define the output file (histogram)
OutFile = OutDir + im[:-3] + "hist"
f = open(OutFile,"wb")
# get the points of interest
PoIsTemp = open(PoIsDir + im[:-3] + "key","rb").readlines()
PoIs = []
for i in range(2,len(PoIsTemp),2):
PoIs.append([ float(p) for p in PoIsTemp[i].split() ])
PoIs = numpy.asarray(PoIs)
sys.stdout.write(". ")
sys.stdout.flush()
if techniqueMid == "hard":
hardSum(PoIs, Codebook, OutFile, size, label)
elif techniqueMid == "soft":
softMax(PoIs, Codebook, OutFile, size, label)
else: #if techniqueMid == "semi":
semiSoft(PoIs, Codebook, OutFile, size, label)
stop = timeit.default_timer()
sys.stdout.write(" Done in " + common_functions.convertTime(stop - start) + "\n")
################################################
|
piresramon/retina.bovw.plosone
|
source/mid_level_script.py
|
Python
|
gpl-3.0
| 9,219
|
[
"Gaussian"
] |
cba8735572ccb21c42094c77e50395069bda2dcb5ea37bab361c608f886326d0
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
import atexit
from concurrent.futures import _base
import Queue as queue
import multiprocessing
import threading
import weakref
import sys
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
nb_shutdown_processes = [0]
def shutdown_one_process():
"""Tell a worker to terminate, which will in turn wake us again"""
call_queue.put(None)
nb_shutdown_processes[0] += 1
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
result_item = result_queue.get(block=True)
if result_item is not None:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
while nb_shutdown_processes[0] < len(processes):
shutdown_one_process()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
call_queue.close()
return
del executor
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
import os
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermine limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
cstipkovic/spidermonkey-research
|
python/futures/concurrent/futures/process.py
|
Python
|
mpl-2.0
| 14,847
|
[
"Brian"
] |
4811c31de39069b5f437a7bbc10ddb59e87f3fa6607d71b365f4988409adef1d
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image preprocessing layers."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine.base_preprocessing_layer import PreprocessingLayer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import control_flow_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util.tf_export import keras_export
ResizeMethod = image_ops.ResizeMethod
_RESIZE_METHODS = {
'bilinear': ResizeMethod.BILINEAR,
'nearest': ResizeMethod.NEAREST_NEIGHBOR,
'bicubic': ResizeMethod.BICUBIC,
'area': ResizeMethod.AREA,
'lanczos3': ResizeMethod.LANCZOS3,
'lanczos5': ResizeMethod.LANCZOS5,
'gaussian': ResizeMethod.GAUSSIAN,
'mitchellcubic': ResizeMethod.MITCHELLCUBIC
}
H_AXIS = 1
W_AXIS = 2
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {'reflect', 'wrap', 'constant', 'nearest'}:
raise NotImplementedError(
'Unknown `fill_mode` {}. Only `reflect`, `wrap`, '
'`constant` and `nearest` are supported.'.format(fill_mode))
if interpolation not in {'nearest', 'bilinear'}:
raise NotImplementedError('Unknown `interpolation` {}. Only `nearest` and '
'`bilinear` are supported.'.format(interpolation))
@keras_export('keras.layers.experimental.preprocessing.Resizing')
class Resizing(PreprocessingLayer):
"""Image resizing layer.
Resize the batched image input to target height and width. The input should
be a 4-D tensor in the format of NHWC.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
"""
def __init__(self,
height,
width,
interpolation='bilinear',
**kwargs):
self.target_height = height
self.target_width = width
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
super(Resizing, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Resizing').set(True)
def call(self, inputs):
outputs = image_ops.resize_images_v2(
images=inputs,
size=[self.target_height, self.target_width],
method=self._interpolation_method)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.target_height, self.target_width, input_shape[3]])
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
'interpolation': self.interpolation,
}
base_config = super(Resizing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.CenterCrop')
class CenterCrop(PreprocessingLayer):
"""Crop the central portion of the images to target height and width.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, target_height, target_width, channels)`.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
"""
def __init__(self, height, width, **kwargs):
self.target_height = height
self.target_width = width
self.input_spec = InputSpec(ndim=4)
super(CenterCrop, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('CenterCrop').set(True)
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = inputs_shape[W_AXIS]
img_hd_diff = img_hd - self.target_height
img_wd_diff = img_wd - self.target_width
checks = []
checks.append(
check_ops.assert_non_negative(
img_hd_diff,
message='The crop height {} should not be greater than input '
'height.'.format(self.target_height)))
checks.append(
check_ops.assert_non_negative(
img_wd_diff,
message='The crop width {} should not be greater than input '
'width.'.format(self.target_width)))
with ops.control_dependencies(checks):
bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)
bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack(
[-1, self.target_height, self.target_width, -1])
outputs = array_ops.slice(inputs, bbox_begin, bbox_size)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.target_height, self.target_width, input_shape[3]])
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
}
base_config = super(CenterCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomCrop')
class RandomCrop(PreprocessingLayer):
"""Randomly crop the images to target height and width.
This layer will crop all the images in the same batch to the same cropping
location.
By default, random cropping is only applied during training. At inference
time, the images will be first rescaled to preserve the shorter side, and
center cropped. If you need to apply random cropping at inference time,
set `training` to True when calling the layer.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(self, height, width, seed=None, **kwargs):
self.height = height
self.width = width
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomCrop, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomCrop').set(True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_cropped_inputs():
"""Cropped inputs with stateless random ops."""
input_shape = array_ops.shape(inputs)
crop_size = array_ops.stack(
[input_shape[0], self.height, self.width, input_shape[3]])
check = control_flow_ops.Assert(
math_ops.reduce_all(input_shape >= crop_size),
[self.height, self.width])
with ops.control_dependencies([check]):
limit = input_shape - crop_size + 1
offset = stateless_random_ops.stateless_random_uniform(
array_ops.shape(input_shape),
dtype=crop_size.dtype,
maxval=crop_size.dtype.max,
seed=self._rng.make_seeds()[:, 0]) % limit
return array_ops.slice(inputs, offset, crop_size)
# TODO(b/143885775): Share logic with Resize and CenterCrop.
def resize_and_center_cropped_inputs():
"""Deterministically resize to shorter side and center crop."""
input_shape = array_ops.shape(inputs)
input_height_t = input_shape[H_AXIS]
input_width_t = input_shape[W_AXIS]
ratio_cond = (input_height_t / input_width_t > (self.height / self.width))
# pylint: disable=g-long-lambda
resized_height = control_flow_util.smart_cond(
ratio_cond,
lambda: math_ops.cast(self.width * input_height_t / input_width_t,
input_height_t.dtype), lambda: self.height)
resized_width = control_flow_util.smart_cond(
ratio_cond, lambda: self.width,
lambda: math_ops.cast(self.height * input_width_t / input_height_t,
input_width_t.dtype))
# pylint: enable=g-long-lambda
resized_inputs = image_ops.resize_images_v2(
images=inputs, size=array_ops.stack([resized_height, resized_width]))
img_hd_diff = resized_height - self.height
img_wd_diff = resized_width - self.width
bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)
bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([-1, self.height, self.width, -1])
outputs = array_ops.slice(resized_inputs, bbox_begin, bbox_size)
return outputs
output = control_flow_util.smart_cond(training, random_cropped_inputs,
resize_and_center_cropped_inputs)
original_shape = inputs.shape.as_list()
batch_size, num_channels = original_shape[0], original_shape[3]
output_shape = [batch_size] + [self.height, self.width] + [num_channels]
output.set_shape(output_shape)
return output
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.height, self.width, input_shape[3]])
def get_config(self):
config = {
'height': self.height,
'width': self.width,
'seed': self.seed,
}
base_config = super(RandomCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.Rescaling')
class Rescaling(PreprocessingLayer):
"""Multiply inputs by `scale` and adds `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
"""
def __init__(self, scale, offset=0., **kwargs):
self.scale = scale
self.offset = offset
super(Rescaling, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Rescaling').set(True)
def call(self, inputs):
dtype = self._compute_dtype
scale = math_ops.cast(self.scale, dtype)
offset = math_ops.cast(self.offset, dtype)
return math_ops.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'scale': self.scale,
'offset': self.offset,
}
base_config = super(Rescaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
HORIZONTAL = 'horizontal'
VERTICAL = 'vertical'
HORIZONTAL_AND_VERTICAL = 'horizontal_and_vertical'
@keras_export('keras.layers.experimental.preprocessing.RandomFlip')
class RandomFlip(PreprocessingLayer):
"""Randomly flip each image horizontally and vertically.
This layer will flip the images based on the `mode` attribute.
During inference time, the output will be identical to input. Call the layer
with `training=True` to flip the input.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
mode: String indicating which flip mode to use. Can be "horizontal",
"vertical", or "horizontal_and_vertical". Defaults to
"horizontal_and_vertical". "horizontal" is a left-right flip and
"vertical" is a top-bottom flip.
seed: Integer. Used to create a random seed.
"""
def __init__(self,
mode=HORIZONTAL_AND_VERTICAL,
seed=None,
**kwargs):
super(RandomFlip, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomFlip').set(True)
self.mode = mode
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError('RandomFlip layer {name} received an unknown mode '
'argument {arg}'.format(name=self.name, arg=mode))
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_flipped_inputs():
flipped_outputs = inputs
if self.horizontal:
flipped_outputs = image_ops.random_flip_left_right(
flipped_outputs, self.seed)
if self.vertical:
flipped_outputs = image_ops.random_flip_up_down(flipped_outputs,
self.seed)
return flipped_outputs
output = control_flow_util.smart_cond(training, random_flipped_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'mode': self.mode,
'seed': self.seed,
}
base_config = super(RandomFlip, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): Add examples, here and everywhere.
@keras_export('keras.layers.experimental.preprocessing.RandomTranslation')
class RandomTranslation(PreprocessingLayer):
"""Randomly translate each image during training.
Args:
height_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting vertically. A negative
value means shifting image up, while a positive value means shifting image
down. When represented as a single positive float, this value is used for
both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)`
results in an output shifted by a random amount in the range [-20%, +30%].
`height_factor=0.2` results in an output height shifted by a random amount
in the range [-20%, +20%].
width_factor: a float represented as fraction of value, or a tuple of size 2
representing lower and upper bound for shifting horizontally. A negative
value means shifting image left, while a positive value means shifting
image right. When represented as a single positive float, this value is
used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and
shifted right by 30%. `width_factor=0.2` results in an output height
shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode` is "constant".
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Raise:
ValueError: if either bound is not between [0, 1], or upper bound is less
than lower bound.
"""
def __init__(self,
height_factor,
width_factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError('`height_factor` cannot have upper bound less than '
'lower bound, got {}'.format(height_factor))
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError('`width_factor` cannot have upper bound less than '
'lower bound, got {}'.format(width_factor))
if abs(self.width_lower) > 1. or abs(self.width_upper) > 1.:
raise ValueError('`width_factor` must have values between [-1, 1], '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomTranslation, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomTranslation').set(
True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_translated_inputs():
"""Translated inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
h_axis, w_axis = H_AXIS, W_AXIS
img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)
height_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=dtypes.float32)
height_translate = height_translate * img_hd
width_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=dtypes.float32)
width_translate = width_translate * img_wd
translations = math_ops.cast(
array_ops.concat([width_translate, height_translate], axis=1),
dtype=dtypes.float32)
return transform(
inputs,
get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value)
output = control_flow_util.smart_cond(training, random_translated_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomTranslation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing [dx, dy] to translate
for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `transform`.
"""
with K.name_scope(name or 'translation_matrix'):
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images,
transforms,
fill_mode='reflect',
fill_value=0.0,
interpolation='bilinear',
output_shape=None,
name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows,
num_columns) (HW). The rank must be statically known (the shape is not
`TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,
c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*
point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode` is "constant".
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
output_shape: Output dimesion after the transform, [height, width]. If None,
output is the same size as input image.
name: The name of the op. ## Fill mode.
Behavior for each valid value is as follows: reflect (d c b a | a b c d | d c
b a) The input is extended by reflecting about the edge of the last pixel.
constant (k k k k | a b c d | k k k k) The input is extended by filling all
values beyond the edge with the same constant value k = 0. wrap (a b c d |
a b c d | a b c d) The input is extended by wrapping around to the opposite
edge. nearest (a a a a | a b c d | d d d d) The input is extended by the
nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with K.name_scope(name or 'transform'):
if output_shape is None:
output_shape = array_ops.shape(images)[1:3]
if not context.executing_eagerly():
output_shape_value = tensor_util.constant_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = ops.convert_to_tensor_v2_with_dispatch(
output_shape, dtypes.int32, name='output_shape')
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError('output_shape must be a 1-D Tensor of 2 elements: '
'new_height, new_width, instead got '
'{}'.format(output_shape))
fill_value = ops.convert_to_tensor_v2_with_dispatch(
fill_value, dtypes.float32, name='fill_value')
if compat.forward_compatible(2020, 8, 5):
return gen_image_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper())
return gen_image_ops.ImageProjectiveTransformV2(
images=images,
output_shape=output_shape,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper())
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images) a
vector with an angle to rotate each image in the batch. The rank must be
statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with K.name_scope(name or 'rotation_matrix'):
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
@keras_export('keras.layers.experimental.preprocessing.RandomRotation')
class RandomRotation(PreprocessingLayer):
"""Randomly rotate each image.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
factor: a float represented as fraction of 2pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an
output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode` is "constant".
Raise:
ValueError: if either bound is not between [0, 1], or upper bound is less
than lower bound.
"""
def __init__(self,
factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError('Factor cannot have negative values, '
'got {}'.format(factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomRotation, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomRotation').set(
True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_rotated_inputs():
"""Rotated inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
min_angle = self.lower * 2. * np.pi
max_angle = self.upper * 2. * np.pi
angles = self._rng.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle)
return transform(
inputs,
get_rotation_matrix(angles, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_rotated_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomRotation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomZoom')
class RandomZoom(PreprocessingLayer):
"""Randomly zoom each image during training.
Args:
height_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range [+20%, +30%].
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range [+20%, +30%].
width_factor: a float represented as fraction of value, or a tuple of size 2
representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the
nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode` is "constant".
Example: >>> input_img = np.random.random((32, 224, 224, 3)) >>> layer =
tf.keras.layers.experimental.preprocessing.RandomZoom(.5, .2) >>> out_img =
layer(input_img) >>> out_img.shape TensorShape([32, 224, 224, 3])
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Raise:
ValueError: if lower bound is not between [0, 1], or upper bound is
negative.
"""
def __init__(self,
height_factor,
width_factor=None,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor # pylint: disable=invalid-unary-operand-type
self.width_upper = width_factor
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`width_factor` must have values larger than -1, '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomZoom, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomZoom').set(True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_zoomed_inputs():
"""Zoomed inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
height_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.height_lower,
maxval=1. + self.height_upper)
if self.width_factor is not None:
width_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.width_lower,
maxval=1. + self.width_upper)
else:
width_zoom = height_zoom
zooms = math_ops.cast(
array_ops.concat([width_zoom, height_zoom], axis=1),
dtype=dtypes.float32)
return transform(
inputs,
get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_zoomed_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomZoom, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing [zx, zy] to zoom for each
image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with K.name_scope(name or 'zoom_matrix'):
num_zooms = array_ops.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.) / 2.0) * (1.0 - zooms[:, 1, None])
return array_ops.concat(
values=[
zooms[:, 0, None],
array_ops.zeros((num_zooms, 1), dtypes.float32),
x_offset,
array_ops.zeros((num_zooms, 1), dtypes.float32),
zooms[:, 1, None],
y_offset,
array_ops.zeros((num_zooms, 2), dtypes.float32),
],
axis=1)
@keras_export('keras.layers.experimental.preprocessing.RandomContrast')
class RandomContrast(PreprocessingLayer):
"""Adjust the contrast of an image or images by a random factor.
Contrast is adjusted independently for each channel of each image during
training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a single
float, lower = upper. The contrast factor will be randomly picked between
[1.0 - lower, 1.0 + upper].
seed: Integer. Used to create a random seed.
Raise:
ValueError: if lower bound is not between [0, 1], or upper bound is
negative.
"""
def __init__(self, factor, seed=None, **kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = self.upper = factor
if self.lower < 0. or self.upper < 0. or self.lower > 1.:
raise ValueError('Factor cannot have negative values or greater than 1.0,'
' got {}'.format(factor))
self.seed = seed
self.input_spec = InputSpec(ndim=4)
super(RandomContrast, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomContrast').set(
True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_contrasted_inputs():
return image_ops.random_contrast(inputs, 1. - self.lower, 1. + self.upper,
self.seed)
output = control_flow_util.smart_cond(training, random_contrasted_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'seed': self.seed,
}
base_config = super(RandomContrast, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomHeight')
class RandomHeight(PreprocessingLayer):
"""Randomly vary the height of a batch of images during training.
Adjusts the height of a batch of images by a random factor. The input
should be a 4-D tensor in the "channels_last" image data format.
By default, this layer is inactive during inference.
Args:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
height changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with height changed by a random
amount in the range `[-20%, +30%]. `factor=0.2` results in an output with
height changed by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
seed: Integer. Used to create a random seed.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`
(data_format='channels_last').
Output shape:
4D tensor with shape: `(samples, random_height, width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.height_lower < -1. or self.height_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomHeight, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomHeight').set(True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_height_inputs():
"""Inputs height-adjusted with random ops."""
inputs_shape = array_ops.shape(inputs)
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = inputs_shape[W_AXIS]
height_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper))
adjusted_height = math_ops.cast(height_factor * img_hd, dtypes.int32)
adjusted_size = array_ops.stack([adjusted_height, img_wd])
output = image_ops.resize_images_v2(
images=inputs, size=adjusted_size, method=self._interpolation_method)
original_shape = inputs.shape.as_list()
output_shape = [original_shape[0]] + [None] + original_shape[2:4]
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_height_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], None, input_shape[2], input_shape[3]])
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomHeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomWidth')
class RandomWidth(PreprocessingLayer):
"""Randomly vary the width of a batch of images during training.
Adjusts the width of a batch of images by a random factor. The input
should be a 4-D tensor in the "channels_last" image data format.
By default, this layer is inactive during inference.
Args:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
width changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2,
0.3)` results in an output with width changed by a random amount in the
range `[-20%, +30%]. `factor=0.2` results in an output with width changed
by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
seed: Integer. Used to create a random seed.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`
(data_format='channels_last').
Output shape:
4D tensor with shape: `(samples, height, random_width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomWidth, self).__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('RandomWidth').set(True)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_width_inputs():
"""Inputs width-adjusted with random ops."""
inputs_shape = array_ops.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
width_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper))
adjusted_width = math_ops.cast(width_factor * img_wd, dtypes.int32)
adjusted_size = array_ops.stack([img_hd, adjusted_width])
output = image_ops.resize_images_v2(
images=inputs, size=adjusted_size, method=self._interpolation_method)
original_shape = inputs.shape.as_list()
output_shape = original_shape[0:2] + [None] + [original_shape[3]]
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_width_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], None, input_shape[3]])
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomWidth, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def make_generator(seed=None):
"""Creates a random generator.
Args:
seed: the seed to initialize the generator. If None, the generator will be
initialized non-deterministically.
Returns:
A generator object.
"""
if seed:
return stateful_random_ops.Generator.from_seed(seed)
else:
return stateful_random_ops.Generator.from_non_deterministic_state()
def get_interpolation(interpolation):
interpolation = interpolation.lower()
if interpolation not in _RESIZE_METHODS:
raise NotImplementedError(
'Value not recognized for `interpolation`: {}. Supported values '
'are: {}'.format(interpolation, _RESIZE_METHODS.keys()))
return _RESIZE_METHODS[interpolation]
|
petewarden/tensorflow
|
tensorflow/python/keras/layers/preprocessing/image_preprocessing.py
|
Python
|
apache-2.0
| 52,762
|
[
"Gaussian"
] |
a1e4a7ef06cac9c27f1fa6a161622774edf99a77385b3427dec351e343f61e56
|
# Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates toy optimization problems.
This module contains a base class, Problem, that defines a minimal interface
for optimization problems, and a few specific problem types that subclass it.
Test functions for optimization: http://www.sfu.ca/~ssurjano/optimization.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from learned_optimizer.problems import problem_spec as prob_spec
tf.app.flags.DEFINE_float("l2_reg_scale", 1e-3,
"""Scaling factor for parameter value regularization
in softmax classifier problems.""")
FLAGS = tf.app.flags.FLAGS
EPSILON = 1e-6
MAX_SEED = 4294967295
PARAMETER_SCOPE = "parameters"
_Spec = prob_spec.Spec
class Problem(object):
"""Base class for optimization problems.
This defines an interface for optimization problems, including objective and
gradients functions and a feed_generator function that yields data to pass to
feed_dict in tensorflow.
Subclasses of Problem must (at the minimum) override the objective method,
which computes the objective/loss/cost to minimize, and specify the desired
shape of the parameters in a list in the param_shapes attribute.
"""
def __init__(self, param_shapes, random_seed, noise_stdev, init_fn=None):
"""Initializes a global random seed for the problem.
Args:
param_shapes: A list of tuples defining the expected shapes of the
parameters for this problem
random_seed: Either an integer (or None, in which case the seed is
randomly drawn)
noise_stdev: Strength (standard deviation) of added gradient noise
init_fn: A function taking a tf.Session object that is used to
initialize the problem's variables.
Raises:
ValueError: If the random_seed is not an integer and not None
"""
if random_seed is not None and not isinstance(random_seed, int):
raise ValueError("random_seed must be an integer or None")
# Pick a random seed.
self.random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
# Store the noise level.
self.noise_stdev = noise_stdev
# Set the random seed to ensure any random data in the problem is the same.
np.random.seed(self.random_seed)
# Store the parameter shapes.
self.param_shapes = param_shapes
if init_fn is not None:
self.init_fn = init_fn
else:
self.init_fn = lambda _: None
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_normal(shape, seed=seed) for shape in self.param_shapes]
def init_variables(self, seed=None):
"""Returns a list of variables with the given shape."""
with tf.variable_scope(PARAMETER_SCOPE):
params = [tf.Variable(param) for param in self.init_tensors(seed)]
return params
def objective(self, parameters, data=None, labels=None):
"""Computes the objective given a list of parameters.
Args:
parameters: The parameters to optimize (as a list of tensors)
data: An optional batch of data for calculating objectives
labels: An optional batch of corresponding labels
Returns:
A scalar tensor representing the objective value
"""
raise NotImplementedError
def gradients(self, objective, parameters):
"""Compute gradients of the objective with respect to the parameters.
Args:
objective: The objective op (e.g. output of self.objective())
parameters: A list of tensors (the parameters to optimize)
Returns:
A list of tensors representing the gradient for each parameter,
returned in the same order as the given list
"""
grads = tf.gradients(objective, list(parameters))
noisy_grads = []
for grad in grads:
if isinstance(grad, tf.IndexedSlices):
noise = self.noise_stdev * tf.random_normal(tf.shape(grad.values))
new_grad = tf.IndexedSlices(grad.values + noise, grad.indices)
else:
new_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape())
noisy_grads.append(new_grad)
return noisy_grads
class Quadratic(Problem):
"""Optimizes a random quadratic function.
The objective is: f(x) = (1/2) ||Wx - y||_2^2
where W is a random Gaussian matrix and y is a random Gaussian vector.
"""
def __init__(self, ndim, random_seed=None, noise_stdev=0.0):
"""Initializes a random quadratic problem."""
param_shapes = [(ndim, 1)]
super(Quadratic, self).__init__(param_shapes, random_seed, noise_stdev)
# Generate a random problem instance.
self.w = np.random.randn(ndim, ndim).astype("float32")
self.y = np.random.randn(ndim, 1).astype("float32")
def objective(self, params, data=None, labels=None):
"""Quadratic objective (see base class for details)."""
return tf.nn.l2_loss(tf.matmul(self.w, params[0]) - self.y)
class SoftmaxClassifier(Problem):
"""Helper functions for supervised softmax classification problems."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_normal(shape, seed=seed) * 1.2 / np.sqrt(shape[0])
for shape in self.param_shapes]
def inference(self, params, data):
"""Computes logits given parameters and data.
Args:
params: List of parameter tensors or variables
data: Batch of features with samples along the first dimension
Returns:
logits: Un-normalized logits with shape (num_samples, num_classes)
"""
raise NotImplementedError
def objective(self, params, data, labels):
"""Computes the softmax cross entropy.
Args:
params: List of parameter tensors or variables
data: Batch of features with samples along the first dimension
labels: Vector of labels with the same number of samples as the data
Returns:
loss: Softmax cross entropy loss averaged over the samples in the batch
Raises:
ValueError: If the objective is to be computed over >2 classes, because
this operation is broken in tensorflow at the moment.
"""
# Forward pass.
logits = self.inference(params, data)
# Compute the loss.
l2reg = [tf.reduce_sum(param ** 2) for param in params]
if int(logits.get_shape()[1]) == 2:
labels = tf.cast(labels, tf.float32)
losses = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits[:, 0])
else:
raise ValueError("Unable to compute softmax cross entropy for more than"
" 2 classes.")
return tf.reduce_mean(losses) + tf.reduce_mean(l2reg) * FLAGS.l2_reg_scale
def argmax(self, logits):
"""Samples the most likely class label given the logits.
Args:
logits: Un-normalized logits with shape (num_samples, num_classes)
Returns:
predictions: Predicted class labels, has shape (num_samples,)
"""
return tf.cast(tf.argmax(tf.nn.softmax(logits), 1), tf.int32)
def accuracy(self, params, data, labels):
"""Computes the accuracy (fraction of correct classifications).
Args:
params: List of parameter tensors or variables
data: Batch of features with samples along the first dimension
labels: Vector of labels with the same number of samples as the data
Returns:
accuracy: Fraction of correct classifications across the batch
"""
predictions = self.argmax(self.inference(params, data))
return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32))
class SoftmaxRegression(SoftmaxClassifier):
"""Builds a softmax regression problem."""
def __init__(self, n_features, n_classes, activation=tf.identity,
random_seed=None, noise_stdev=0.0):
self.activation = activation
self.n_features = n_features
param_shapes = [(n_features, n_classes), (n_classes,)]
super(SoftmaxRegression, self).__init__(param_shapes,
random_seed,
noise_stdev)
def inference(self, params, data):
features = tf.reshape(data, (-1, self.n_features))
return tf.matmul(features, params[0]) + params[1]
class SparseSoftmaxRegression(SoftmaxClassifier):
"""Builds a sparse input softmax regression problem."""
def __init__(self,
n_features,
n_classes,
activation=tf.identity,
random_seed=None,
noise_stdev=0.0):
self.activation = activation
self.n_features = n_features
param_shapes = [(n_classes, n_features), (n_features, n_classes), (
n_classes,)]
super(SparseSoftmaxRegression, self).__init__(param_shapes, random_seed,
noise_stdev)
def inference(self, params, data):
all_embeddings, softmax_weights, softmax_bias = params
embeddings = tf.nn.embedding_lookup(all_embeddings, tf.cast(data, tf.int32))
embeddings = tf.reduce_sum(embeddings, 1)
return tf.matmul(embeddings, softmax_weights) + softmax_bias
class OneHotSparseSoftmaxRegression(SoftmaxClassifier):
"""Builds a sparse input softmax regression problem.
This is identical to SparseSoftmaxRegression, but without using embedding
ops.
"""
def __init__(self,
n_features,
n_classes,
activation=tf.identity,
random_seed=None,
noise_stdev=0.0):
self.activation = activation
self.n_features = n_features
self.n_classes = n_classes
param_shapes = [(n_classes, n_features), (n_features, n_classes), (
n_classes,)]
super(OneHotSparseSoftmaxRegression, self).__init__(param_shapes,
random_seed,
noise_stdev)
def inference(self, params, data):
all_embeddings, softmax_weights, softmax_bias = params
num_ids = tf.shape(data)[1]
one_hot_embeddings = tf.one_hot(tf.cast(data, tf.int32), self.n_classes)
one_hot_embeddings = tf.reshape(one_hot_embeddings, [-1, self.n_classes])
embeddings = tf.matmul(one_hot_embeddings, all_embeddings)
embeddings = tf.reshape(embeddings, [-1, num_ids, self.n_features])
embeddings = tf.reduce_sum(embeddings, 1)
return tf.matmul(embeddings, softmax_weights) + softmax_bias
class FullyConnected(SoftmaxClassifier):
"""Builds a multi-layer perceptron classifier."""
def __init__(self, n_features, n_classes, hidden_sizes=(32, 64),
activation=tf.nn.sigmoid, random_seed=None, noise_stdev=0.0):
"""Initializes an multi-layer perceptron classification problem."""
# Store the number of features and activation function.
self.n_features = n_features
self.activation = activation
# Define the network as a list of weight + bias shapes for each layer.
param_shapes = []
for ix, sz in enumerate(hidden_sizes + (n_classes,)):
# The previous layer"s size (n_features if input).
prev_size = n_features if ix == 0 else hidden_sizes[ix - 1]
# Weight shape for this layer.
param_shapes.append((prev_size, sz))
# Bias shape for this layer.
param_shapes.append((sz,))
super(FullyConnected, self).__init__(param_shapes, random_seed, noise_stdev)
def inference(self, params, data):
# Flatten the features into a vector.
features = tf.reshape(data, (-1, self.n_features))
# Pass the data through the network.
preactivations = tf.matmul(features, params[0]) + params[1]
for layer in range(2, len(self.param_shapes), 2):
net = self.activation(preactivations)
preactivations = tf.matmul(net, params[layer]) + params[layer + 1]
return preactivations
def accuracy(self, params, data, labels):
"""Computes the accuracy (fraction of correct classifications).
Args:
params: List of parameter tensors or variables
data: Batch of features with samples along the first dimension
labels: Vector of labels with the same number of samples as the data
Returns:
accuracy: Fraction of correct classifications across the batch
"""
predictions = self.argmax(self.activation(self.inference(params, data)))
return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32))
class ConvNet(SoftmaxClassifier):
"""Builds an N-layer convnet for image classification."""
def __init__(self,
image_shape,
n_classes,
filter_list,
activation=tf.nn.relu,
random_seed=None,
noise_stdev=0.0):
# Number of channels, number of pixels in x- and y- dimensions.
n_channels, px, py = image_shape
# Store the activation.
self.activation = activation
param_shapes = []
input_size = n_channels
for fltr in filter_list:
# Add conv2d filters.
param_shapes.append((fltr[0], fltr[1], input_size, fltr[2]))
input_size = fltr[2]
# Number of units in the final (dense) layer.
self.affine_size = input_size * px * py
param_shapes.append((self.affine_size, n_classes)) # affine weights
param_shapes.append((n_classes,)) # affine bias
super(ConvNet, self).__init__(param_shapes, random_seed, noise_stdev)
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_normal(shape, mean=0., stddev=0.01, seed=seed)
for shape in self.param_shapes]
def inference(self, params, data):
# Unpack.
w_conv_list = params[:-2]
output_w, output_b = params[-2:]
conv_input = data
for w_conv in w_conv_list:
layer = tf.nn.conv2d(conv_input, w_conv, strides=[1] * 4, padding="SAME")
output = self.activation(layer)
conv_input = output
# Flatten.
flattened = tf.reshape(conv_input, (-1, self.affine_size))
# Fully connected layer.
return tf.matmul(flattened, output_w) + output_b
class Bowl(Problem):
"""A 2D quadratic bowl."""
def __init__(self, condition_number, angle=0.0,
random_seed=None, noise_stdev=0.0):
assert condition_number > 0, "Condition number must be positive."
# Define parameter shapes.
param_shapes = [(2, 1)]
super(Bowl, self).__init__(param_shapes, random_seed, noise_stdev)
self.condition_number = condition_number
self.angle = angle
self._build_matrix(condition_number, angle)
def _build_matrix(self, condition_number, angle):
"""Builds the Hessian matrix."""
hessian = np.array([[condition_number, 0.], [0., 1.]], dtype="float32")
# Build the rotation matrix.
rotation_matrix = np.array([
[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]
])
# The objective is 0.5 * || Ax ||_2^2
# where the data matrix (A) is: sqrt(Hessian).dot(rotation_matrix).
self.matrix = np.sqrt(hessian).dot(rotation_matrix)
def objective(self, params, data=None, labels=None):
mtx = tf.constant(self.matrix, dtype=tf.float32)
return tf.nn.l2_loss(tf.matmul(mtx, params[0]))
def surface(self, xlim=5, ylim=5, n=50):
xm, ym = _mesh(xlim, ylim, n)
pts = np.vstack([xm.ravel(), ym.ravel()])
zm = 0.5 * np.linalg.norm(self.matrix.dot(pts), axis=0) ** 2
return xm, ym, zm.reshape(n, n)
class Problem2D(Problem):
def __init__(self, random_seed=None, noise_stdev=0.0):
param_shapes = [(2,)]
super(Problem2D, self).__init__(param_shapes, random_seed, noise_stdev)
def surface(self, n=50, xlim=5, ylim=5):
"""Computes the objective surface over a 2d mesh."""
# Create a mesh over the given coordinate ranges.
xm, ym = _mesh(xlim, ylim, n)
with tf.Graph().as_default(), tf.Session() as sess:
# Ops to compute the objective at every (x, y) point.
x = tf.placeholder(tf.float32, shape=xm.shape)
y = tf.placeholder(tf.float32, shape=ym.shape)
obj = self.objective([[x, y]])
# Run the computation.
zm = sess.run(obj, feed_dict={x: xm, y: ym})
return xm, ym, zm
class Rosenbrock(Problem2D):
"""See https://en.wikipedia.org/wiki/Rosenbrock_function.
This function has a single global minima at [1, 1]
The objective value at this point is zero.
"""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-5., maxval=10., seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = (1 - x)**2 + 100 * (y - x**2)**2
return tf.squeeze(obj)
def make_rosenbrock_loss_and_init(device=None):
"""A variable-backed version of Rosenbrock problem.
See the Rosenbrock class for details.
Args:
device: Where to place the ops of this problem.
Returns:
A tuple of two callables, first of which creates the loss and the second
creates the parameter initializer function.
"""
def make_rosenbrock_loss():
with tf.name_scope("optimizee"):
with tf.device(device):
x = tf.get_variable("x", [1])
y = tf.get_variable("y", [1])
c = tf.get_variable(
"c", [1],
initializer=tf.constant_initializer(100.0),
trainable=False)
obj = (1 - x)**2 + c * (y - x**2)**2
return tf.squeeze(obj)
def make_init_fn(parameters):
with tf.device(device):
init_op = tf.variables_initializer(parameters)
def init_fn(sess):
tf.logging.info("Initializing model parameters.")
sess.run(init_op)
return init_fn
return make_rosenbrock_loss, make_init_fn
class Saddle(Problem2D):
"""Loss surface around a saddle point."""
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = x ** 2 - y ** 2
return tf.squeeze(obj)
class LogSumExp(Problem2D):
"""2D function defined by the log of the sum of exponentials."""
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = tf.log(tf.exp(x + 3. * y - 0.1) +
tf.exp(x - 3. * y - 0.1) +
tf.exp(-x - 0.1) + 1.0)
return tf.squeeze(obj)
class Ackley(Problem2D):
"""Ackley's function (contains many local minima)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-32.768, maxval=32.768, seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = (-20 * tf.exp(-0.2 * tf.sqrt(0.5 * (x ** 2 + y ** 2))) -
tf.exp(0.5 * (tf.cos(2 * np.pi * x) + tf.cos(2 * np.pi * y))) +
tf.exp(1.0) + 20.)
return tf.squeeze(obj)
class Beale(Problem2D):
"""Beale function (a multimodal function with sharp peaks)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-4.5, maxval=4.5, seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = ((1.5 - x + x * y) ** 2 +
(2.25 - x + x * y ** 2) ** 2 +
(2.625 - x + x * y ** 3) ** 2)
return tf.squeeze(obj)
class Booth(Problem2D):
"""Booth's function (has a long valley along one dimension)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-10., maxval=10., seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = (x + 2 * y - 7) ** 2 + (2 * x + y - 5) ** 2
return tf.squeeze(obj)
class StyblinskiTang(Problem2D):
"""Styblinski-Tang function (a bumpy function in two dimensions)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-5., maxval=5., seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
params = tf.split(params[0], 2, axis=0)
obj = 0.5 * tf.reduce_sum([x ** 4 - 16 * x ** 2 + 5 * x
for x in params], 0) + 80.
return tf.squeeze(obj)
class Matyas(Problem2D):
"""Matyas function (a function with a single global minimum in a valley)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=-10, maxval=10, seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
obj = 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y
return tf.squeeze(obj)
class Branin(Problem2D):
"""Branin function (a function with three global minima)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
x1 = tf.random_uniform((1,), minval=-5., maxval=10.,
seed=seed)
x2 = tf.random_uniform((1,), minval=0., maxval=15.,
seed=seed)
return [tf.concat([x1, x2], 0)]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
# Define some constants.
a = 1.
b = 5.1 / (4. * np.pi ** 2)
c = 5 / np.pi
r = 6.
s = 10.
t = 1 / (8. * np.pi)
# Evaluate the function.
obj = a * (y - b * x ** 2 + c * x - r) ** 2 + s * (1 - t) * tf.cos(x) + s
return tf.squeeze(obj)
class Michalewicz(Problem2D):
"""Michalewicz function (has steep ridges and valleys)."""
def init_tensors(self, seed=None):
"""Returns a list of tensors with the given shape."""
return [tf.random_uniform(shape, minval=0., maxval=np.pi, seed=seed)
for shape in self.param_shapes]
def objective(self, params, data=None, labels=None):
x, y = tf.split(params[0], 2, axis=0)
m = 5 # Defines how steep the ridges are (larger m => steeper ridges).
obj = 2. - (tf.sin(x) * tf.sin(x ** 2 / np.pi) ** (2 * m) +
tf.sin(y) * tf.sin(2 * y ** 2 / np.pi) ** (2 * m))
return tf.squeeze(obj)
class Rescale(Problem):
"""Takes an existing problem, and rescales all the parameters."""
def __init__(self, problem_spec, scale=10., noise_stdev=0.0):
self.problem = problem_spec.build()
self.param_shapes = self.problem.param_shapes
self.scale = scale
super(Rescale, self).__init__(self.param_shapes, random_seed=None,
noise_stdev=noise_stdev)
def init_tensors(self, seed=None):
params_raw = self.problem.init_tensors(seed=seed)
params = [t * self.scale for t in params_raw]
return params
def objective(self, params, data=None, labels=None):
params_raw = [t/self.scale for t in params]
problem_obj = self.problem.objective(params_raw, data, labels)
return problem_obj
class SumTask(Problem):
"""Takes a list of problems and modifies the objective to be their sum."""
def __init__(self, problem_specs, noise_stdev=0.0):
self.problems = [ps.build() for ps in problem_specs]
self.param_shapes = []
for prob in self.problems:
self.param_shapes += prob.param_shapes
super(SumTask, self).__init__(self.param_shapes, random_seed=None,
noise_stdev=noise_stdev)
def init_tensors(self, seed=None):
tensors = []
for prob in self.problems:
tensors += prob.init_tensors(seed=seed)
return tensors
def objective(self, params, data=None, labels=None):
obj = 0.
index = 0
for prob in self.problems:
num_params = len(prob.param_shapes)
obj += prob.objective(params[index:index + num_params])
index += num_params
return obj
class IsotropicQuadratic(Problem):
"""An isotropic quadratic problem."""
def objective(self, params, data=None, labels=None):
return sum([tf.reduce_sum(param ** 2) for param in params])
class Norm(Problem):
"""Takes an existing problem and modifies the objective to be its N-norm."""
def __init__(self, ndim, random_seed=None, noise_stdev=0.0, norm_power=2.):
param_shapes = [(ndim, 1)]
super(Norm, self).__init__(param_shapes, random_seed, noise_stdev)
# Generate a random problem instance.
self.w = np.random.randn(ndim, ndim).astype("float32")
self.y = np.random.randn(ndim, 1).astype("float32")
self.norm_power = norm_power
def objective(self, params, data=None, labels=None):
diff = tf.matmul(self.w, params[0]) - self.y
exp = 1. / self.norm_power
loss = tf.reduce_sum((tf.abs(diff) + EPSILON) ** self.norm_power) ** exp
return loss
class LogObjective(Problem):
"""Takes an existing problem and modifies the objective to be its log."""
def __init__(self, problem_spec):
self.problem = problem_spec.build()
self.param_shapes = self.problem.param_shapes
super(LogObjective, self).__init__(self.param_shapes,
random_seed=None,
noise_stdev=0.0)
def objective(self, params, data=None, labels=None):
problem_obj = self.problem.objective(params, data, labels)
return tf.log(problem_obj + EPSILON) - tf.log(EPSILON)
class SparseProblem(Problem):
"""Takes a problem and sets gradients to 0 with the given probability."""
def __init__(self,
problem_spec,
zero_probability=0.99,
random_seed=None,
noise_stdev=0.0):
self.problem = problem_spec.build()
self.param_shapes = self.problem.param_shapes
self.zero_prob = zero_probability
super(SparseProblem, self).__init__(self.param_shapes,
random_seed=random_seed,
noise_stdev=noise_stdev)
def objective(self, parameters, data=None, labels=None):
return self.problem.objective(parameters, data, labels)
def gradients(self, objective, parameters):
grads = tf.gradients(objective, list(parameters))
new_grads = []
for grad in grads:
mask = tf.greater(self.zero_prob, tf.random_uniform(grad.get_shape()))
zero_grad = tf.zeros_like(grad, dtype=tf.float32)
noisy_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape())
new_grads.append(tf.where(mask, zero_grad, noisy_grad))
return new_grads
class DependencyChain(Problem):
"""A problem in which parameters must be optimized in order.
A sequence of parameters which all need to be brought to 0, but where each
parameter in the sequence can't be brought to 0 until the preceding one
has been. This should take a long time to optimize, with steady
(or accelerating) progress throughout the entire process.
"""
def __init__(self, ndim, random_seed=None, noise_stdev=0.):
param_shapes = [(ndim + 1,)]
self.ndim = ndim
super(DependencyChain, self).__init__(
param_shapes, random_seed, noise_stdev)
def objective(self, params, data=None, labels=None):
terms = params[0][0]**2 + params[0][1:]**2 / (params[0][:-1]**2 + EPSILON)
return tf.reduce_sum(terms)
class MinMaxWell(Problem):
"""Problem with global min when both the min and max (absolute) params are 1.
The gradient for all but two parameters (the min and max) is zero. This
should therefore encourage the optimizer to behave sensible even when
parameters have zero gradients, as is common eg for some deep neural nets.
"""
def __init__(self, ndim, random_seed=None, noise_stdev=0.):
param_shapes = [(ndim,)]
self.ndim = ndim
super(MinMaxWell, self).__init__(param_shapes, random_seed, noise_stdev)
def objective(self, params, data=None, labels=None):
params_sqr = params[0]**2
min_sqr = tf.reduce_min(params_sqr)
max_sqr = tf.reduce_max(params_sqr)
epsilon = 1e-12
return max_sqr + 1./min_sqr - 2. + epsilon
class OutwardSnake(Problem):
"""A winding path out to infinity.
Ideal step length stays constant along the entire path.
"""
def __init__(self, ndim, random_seed=None, noise_stdev=0.):
param_shapes = [(ndim,)]
self.ndim = ndim
super(OutwardSnake, self).__init__(param_shapes, random_seed, noise_stdev)
def objective(self, params, data, labels=None):
radius = tf.sqrt(tf.reduce_sum(params[0]**2))
rad_loss = tf.reduce_sum(1. / (radius + 1e-6) * data[:, 0])
sin_dist = params[0][1:] - tf.cos(params[0][:-1]) * np.pi
sin_loss = tf.reduce_sum((sin_dist * data[:, 1:])**2)
return rad_loss + sin_loss
class ProjectionQuadratic(Problem):
"""Dataset consists of different directions to probe. Global min is at 0."""
def __init__(self, ndim, random_seed=None, noise_stdev=0.):
param_shapes = [(1, ndim)]
super(ProjectionQuadratic, self).__init__(
param_shapes, random_seed, noise_stdev)
def objective(self, params, data, labels=None):
return tf.reduce_sum((params[0] * data)**2)
class SumOfQuadratics(Problem):
def __init__(self, ndim, random_seed=None, noise_stdev=0.):
param_shapes = [(1, ndim)]
super(SumOfQuadratics, self).__init__(
param_shapes, random_seed, noise_stdev)
def objective(self, params, data, labels=None):
epsilon = 1e-12
# Assume dataset is designed so that the global minimum is at params=0.
# Subtract loss at params=0, so that global minimum has objective value
# epsilon (added to avoid floating point issues).
return (tf.reduce_sum((params[0] - data)**2) - tf.reduce_sum(data**2) +
epsilon)
class MatMulAlgorithm(Problem):
"""A 6-th order polynomial optimization problem.
This problem is parametrized by n and k. A solution to this problem with
objective value exactly zero defines a matrix multiplication algorithm of
n x n matrices using k multiplications between matrices. When applied
recursively, such an algorithm has complexity O(n^(log_n(k))).
Given n, it is not known in general which values of k in [n^2, n^3] have a
solution. There is always a solution with k = n^3 (this is the naive
algorithm).
In the special case n = 2, it is known that there are solutions for k = {7, 8}
but not for k <= 6. For n = 3, it is known that there are exact solutions for
23 <= k <= 27, and there are asymptotic solutions for k = {21, 22}, but the
other cases are unknown.
For a given n and k, if one solution exists then infinitely many solutions
exist due to permutation and scaling symmetries in the parameters.
This is a very hard problem for some values of n and k (e.g. n = 3, k = 21),
but very easy for other values (e.g. n = 2, k = 7).
For a given n and k, the specific formulation of this problem is as follows.
Let theta_a, theta_b, theta_c be parameter matrices with respective dimensions
[n**2, k], [n**2, k], [k, n**2]. Then for any matrices a, b with shape [n, n],
we can form the matrix c with shape [n, n] via the operation:
((vec(a) * theta_a) .* (vec(b) * theta_b)) * theta_c = vec(c), (#)
where vec(x) is the operator that flattens a matrix with shape [n, n] into a
row vector with shape [1, n**2], * denotes matrix multiplication and .*
denotes elementwise multiplication.
This operation, parameterized by theta_a, theta_b, theta_c, is a matrix
multiplication algorithm iff c = a*b for all [n, n] matrices a and b. But
actually it suffices to verify all combinations of one-hot matrices a and b,
of which there are n**4 such combinations. This gives a batch of n**4 matrix
triplets (a, b, c) such that equation (#) must hold for each triplet. We solve
for theta_a, theta_b, theta_c by minimizing the sum of squares of errors
across this batch.
Finally, theta_c can be computed from theta_a and theta_b. Therefore it
suffices to learn theta_a and theta_b, from which theta_c and therefore the
objective value can be computed.
"""
def __init__(self, n, k):
assert isinstance(n, int), "n must be an integer"
assert isinstance(k, int), "k must be an integer"
assert n >= 2, "Must have n >= 2"
assert k >= n**2 and k <= n**3, "Must have n**2 <= k <= n**3"
param_shapes = [(n**2, k), (n**2, k)] # theta_a, theta_b
super(MatMulAlgorithm, self).__init__(
param_shapes, random_seed=None, noise_stdev=0.0)
self.n = n
self.k = k
# Build a batch of all combinations of one-hot matrices a, b, and their
# respective products c. Correctness on this batch is a necessary and
# sufficient condition for the algorithm to be valid. The number of matrices
# in {a, b, c}_3d is n**4 and each matrix is n x n.
onehots = np.identity(n**2).reshape(n**2, n, n)
a_3d = np.repeat(onehots, n**2, axis=0)
b_3d = np.tile(onehots, [n**2, 1, 1])
c_3d = np.matmul(a_3d, b_3d)
# Convert the batch to 2D Tensors.
self.a = tf.constant(a_3d.reshape(n**4, n**2), tf.float32, name="a")
self.b = tf.constant(b_3d.reshape(n**4, n**2), tf.float32, name="b")
self.c = tf.constant(c_3d.reshape(n**4, n**2), tf.float32, name="c")
def init_tensors(self, seed=None):
# Initialize params such that the columns of theta_a and theta_b have L2
# norm 1.
def _param_initializer(shape, seed=None):
x = tf.random_normal(shape, dtype=tf.float32, seed=seed)
return tf.transpose(tf.nn.l2_normalize(tf.transpose(x), 1))
return [_param_initializer(shape, seed) for shape in self.param_shapes]
def objective(self, parameters, data=None, labels=None):
theta_a = parameters[0]
theta_b = parameters[1]
# Compute theta_c from theta_a and theta_b.
p = tf.matmul(self.a, theta_a) * tf.matmul(self.b, theta_b)
p_trans = tf.transpose(p, name="p_trans")
p_inv = tf.matmul(
tf.matrix_inverse(tf.matmul(p_trans, p)), p_trans, name="p_inv")
theta_c = tf.matmul(p_inv, self.c, name="theta_c")
# Compute the "predicted" value of c.
c_hat = tf.matmul(p, theta_c, name="c_hat")
# Compute the loss (sum of squared errors).
loss = tf.reduce_sum((c_hat - self.c)**2, name="loss")
return loss
def matmul_problem_sequence(n, k_min, k_max):
"""Helper to generate a sequence of matrix multiplication problems."""
return [(_Spec(MatMulAlgorithm, (n, k), {}), None, None)
for k in range(k_min, k_max + 1)]
def init_fixed_variables(arrays):
with tf.variable_scope(PARAMETER_SCOPE):
params = [tf.Variable(arr.astype("float32")) for arr in arrays]
return params
def _mesh(xlim, ylim, n):
"""Creates a 2D meshgrid covering the given ranges.
Args:
xlim: int that defines the desired x-range (-xlim, xlim)
ylim: int that defines the desired y-range (-ylim, ylim)
n: number of points in each dimension of the mesh
Returns:
xm: 2D array of x-values in the mesh
ym: 2D array of y-values in the mesh
"""
return np.meshgrid(np.linspace(-xlim, xlim, n),
np.linspace(-ylim, ylim, n))
|
jiaphuan/models
|
research/learned_optimizer/problems/problem_generator.py
|
Python
|
apache-2.0
| 35,915
|
[
"Gaussian"
] |
213f4f1b80e9216a3c325d788ff9d81edec7d0e390eac72edb106416171fcb13
|
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.mul import _keep_coeff
from sympy.core.symbol import Symbol
from sympy.core.basic import preorder_traversal
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.core.compatibility import as_int, SYMPY_INTS
from sympy.logic.boolalg import BooleanAtom
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group, sift, public
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable, range
@public
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set([])
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*sqrt(x), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
_eval_diff = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, r.imag))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc)
return sympify(F.degree(opt.gen))
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the ``mod_inverse`` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
>>> f = x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x
>>> gff_list(f)
[(x**3 + 7, 2), (x**2 + 5*x, 3)]
>>> ff(x**3 + 7, 2)*ff(x**2 + 5*x, 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), -2*sqrt(2) + 2)
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
sifted = sift(args, lambda z: z.is_rational)
c1, c2 = sifted[True], sifted[False]
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
sifted = sift(f.args, lambda x: x.is_commutative is True and not x.has(Piecewise))
c, nc = sifted[True], sifted[False]
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func._from_args(c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
polys[i] = ring.from_dict(poly.rep.to_dict())
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.has_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
Arafatk/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 173,160
|
[
"Gaussian"
] |
152bfa632eb4a920dc148a59682a28ebb0fdc8c2ef8a5998aead6630a21a8366
|
# coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
bscott.murphy@gmail.com
Summary
-------
Contains class OrdinaryKriging3D.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
from . import variogram_models
from . import core
from .core import (
_adjust_for_anisotropy,
_initialize_variogram_model,
_make_variogram_parameter_list,
_find_statistics,
P_INV,
)
import warnings
class OrdinaryKriging3D:
"""Three-dimensional ordinary kriging.
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Z-coordinates of data points.
val : array_like
Values at data points.
variogram_model : str or GSTools CovModel, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only technically
correct for one-dimensional problems.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
# linear
{'slope': slope, 'nugget': nugget}
# power
{'scale': scale, 'exponent': exponent, 'nugget': nugget}
# gaussian, spherical, exponential and hole-effect:
{'sill': s, 'range': r, 'nugget': n}
# OR
{'psill': p, 'range': r, 'nugget': n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
# linear
[slope, nugget]
# power
[scale, exponent, nugget]
# gaussian, spherical, exponential and hole-effect:
[sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model;
second, the distances at which to calculate the variogram model.
The list provided in variogram_parameters will be passed to the
function as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_angle_x : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_y : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_z : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
verbose : bool, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : bool, optional
Enables plotting to display variogram. Default is False (off).
exact_values : bool, optional
If True, interpolation provides input values at input locations.
If False, interpolation accounts for variance/nugget within input
values at input locations and does not behave as an
exact-interpolator [2]. Note that this only has an effect if
there is variance/nugget present within the input data since it is
interpreted as measurement error. If the nugget is zero, the kriged
field will behave as an exact interpolator.
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
pseudo_inv_type : :class:`str`, optional
Here you can select the algorithm to compute the pseudo-inverse matrix:
* `"pinv"`: use `pinv` from `scipy` which uses `lstsq`
* `"pinv2"`: use `pinv2` from `scipy` which uses `SVD`
* `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values
Default: `"pinv"`
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
"""
eps = 1.0e-10 # Cutoff for comparison to zero
variogram_dict = {
"linear": variogram_models.linear_variogram_model,
"power": variogram_models.power_variogram_model,
"gaussian": variogram_models.gaussian_variogram_model,
"spherical": variogram_models.spherical_variogram_model,
"exponential": variogram_models.exponential_variogram_model,
"hole-effect": variogram_models.hole_effect_variogram_model,
}
def __init__(
self,
x,
y,
z,
val,
variogram_model="linear",
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
verbose=False,
enable_plotting=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
):
# config the pseudo inverse
self.pseudo_inv = bool(pseudo_inv)
self.pseudo_inv_type = str(pseudo_inv_type)
if self.pseudo_inv_type not in P_INV:
raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type))
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
if not isinstance(exact_values, bool):
raise ValueError("exact_values has to be boolean True or False")
self.exact_values = exact_values
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
if self.model.dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(
np.squeeze(np.array(x, copy=True, dtype=np.float64))
)
self.Y_ORIG = np.atleast_1d(
np.squeeze(np.array(y, copy=True, dtype=np.float64))
)
self.Z_ORIG = np.atleast_1d(
np.squeeze(np.array(z, copy=True, dtype=np.float64))
)
self.VALUES = np.atleast_1d(
np.squeeze(np.array(val, copy=True, dtype=np.float64))
)
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG)) / 2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def update_variogram_model(
self,
variogram_model,
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
):
"""Changes the variogram model and variogram parameters for
the kriging system.
Parameters
----------
variogram_model : str or GSTools CovModel
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating
variogram model. See above for more information. True indicates
that weights will be applied. Default is False.
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in y-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in z-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_angle_x : float, optional
Angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_y : float, optional
Angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_z : float, optional
Angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
"""
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
if self.model.dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if (
anisotropy_scaling_y != self.anisotropy_scaling_y
or anisotropy_scaling_z != self.anisotropy_scaling_z
or anisotropy_angle_x != self.anisotropy_angle_x
or anisotropy_angle_y != self.anisotropy_angle_y
or anisotropy_angle_z != self.anisotropy_angle_z
):
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[
self.anisotropy_angle_x,
self.anisotropy_angle_y,
self.anisotropy_angle_z,
],
).T
if self.verbose:
print("Updating variogram mode...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, "r*")
ax.plot(
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
"k-",
)
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*")
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
xyz = np.concatenate(
(
self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis],
),
axis=1,
)
d = cdist(xyz, xyz, "euclidean")
a = np.zeros((n + 1, n + 1))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n + 1, 1))
b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n + 1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n + 1)).T).reshape((1, n + 1, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = -x[:, 0].dot(b[:, 0])
return kvalues, sigmasq
def execute(
self,
style,
xpoints,
ypoints,
zpoints,
mask=None,
backend="vectorized",
n_closest_points=None,
):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation.
Note that currently measurements (i.e., z values) are
considered 'exact'. This means that, when a specified coordinate
for interpolation is exactly the same as one of the data points,
the variogram evaluated at the point is forced to be zero.
Also, the diagonal of the kriging matrix is also always forced
to be zero. In forcing the variogram evaluated at data points
to be zero, we are effectively saying that there is no variance
at that point (no uncertainty, so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean
flag that can be adjusted to specify whether to treat the
measurements as 'exact'. Setting the flag to false would indicate
that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the
point will be equal to the nugget. This would mean that the diagonal
of the kriging matrix would be set to the nugget instead of to zero.
Parameters
----------
style : str
Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays
of x, y, and z coordinates that define a rectangular grid and uses
mask to only evaluate specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
LxMxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array-like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
LxMxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints : array-like, shape (L,) or (L, 1)
If style is specified as 'grid' or 'masked', z-coordinates of
LxMxN grid. If style is specified as 'points', z-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask : boolean array, shape (L, M, N), optional
Specifies the points in the rectangular grid defined by xpoints,
ypoints, zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified
as 'masked'. False indicates that the point should not be masked,
so the kriging system will be solved at the point.
True indicates that the point should be masked, so the kriging
system should will not be solved at the point.
backend : str, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
n_closest_points : int, optional
For kriging with a moving window, specifies the number of nearby
points to use in the calculation. This can speed up the calculation
for large datasets, but should be used with caution.
As Kitanidis notes, kriging with a moving window can produce
unexpected oddities if the variogram model is not carefully chosen.
Returns
-------
kvalues : ndarray, shape (L, M, N) or (N, 1)
Interpolated values of specified grid or at the specified set
of points. If style was specified as 'masked', kvalues will be a
numpy masked array.
sigmasq : ndarray, shape (L, M, N) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != "grid" and style != "masked" and style != "points":
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n)
if style in ["grid", "masked"]:
if style == "masked":
if mask is None:
raise IOError(
"Must specify boolean masking array when style is 'masked'."
)
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if (
mask.shape[0] == nx
and mask.shape[2] == nz
and mask.shape[1] == ny
):
mask = mask.swapaxes(0, 2)
else:
raise ValueError(
"Mask dimensions do not match specified grid dimensions."
)
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing="ij")
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == "points":
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError(
"xpoints, ypoints, and zpoints must have "
"same dimensions when treated as listing "
"discrete points."
)
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts, ypts, zpts = _adjust_for_anisotropy(
np.vstack((xpts, ypts, zpts)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if style != "masked":
mask = np.zeros(npt, dtype="bool")
xyz_points = np.concatenate(
(zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1
)
xyz_data = np.concatenate(
(
self.Z_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis],
),
axis=1,
)
bd = cdist(xyz_points, xyz_data, "euclidean")
if n_closest_points is not None:
from scipy.spatial import cKDTree
tree = cKDTree(xyz_data)
bd, bd_idx = tree.query(xyz_points, k=n_closest_points, eps=0.0)
if backend == "loop":
kvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx)
else:
raise ValueError(
"Specified backend '{}' not supported "
"for moving window.".format(backend)
)
else:
if backend == "vectorized":
kvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == "loop":
kvalues, sigmasq = self._exec_loop(a, bd, mask)
else:
raise ValueError(
"Specified backend {} is not supported for "
"3D ordinary kriging.".format(backend)
)
if style == "masked":
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ["masked", "grid"]:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq
|
bsmurphy/PyKrige
|
pykrige/ok3d.py
|
Python
|
bsd-3-clause
| 39,755
|
[
"Gaussian"
] |
cb5d0705963e250af188b718232deb1b8b662ff33ab269fd65965295196e7ae4
|
#!/usr/bin/python
#=============================================================================================
# Test MBAR by performing statistical tests on a set of of 1D harmonic oscillators, for which
# the true free energy differences can be computed analytically.
#
# A number of replications of an experiment in which i.i.d. samples are drawn from a set of
# K harmonic oscillators are produced. For each replicate, we estimate the dimensionless free
# energy differences and mean-square displacements (an observable), as well as their uncertainties.
#
# For a 1D harmonic oscillator, the potential is given by
# V(x;K) = (K/2) * (x-x_0)**2
# where K denotes the spring constant.
#
# The equilibrium distribution is given analytically by
# p(x;beta,K) = sqrt[(beta K) / (2 pi)] exp[-beta K (x-x_0)**2 / 2]
# The dimensionless free energy is therefore
# f(beta,K) = - (1/2) * ln[ (2 pi) / (beta K) ]
#
#=============================================================================================
#=============================================================================================
#=============================================================================================
# IMPORTS
#=============================================================================================
import numpy
from pymbar import testsystems, MBAR, confidenceintervals
#=============================================================================================
# PARAMETERS
#=============================================================================================
K_k = numpy.array([25, 16, 9, 4, 1, 1]) # spring constants for each state
O_k = numpy.array([0, 1, 2, 3, 4, 5]) # offsets for spring constants
N_k = numpy.array([2000, 2000, 2000, 2000, 2000, 0]) # number of samples from each state (can be zero for some states)
beta = 1.0 # inverse temperature for all simulations
nreplicates = 200 # number of replicates of experiment for testing uncertainty estimate
generateplots = True
if (generateplots):
try:
import matplotlib.pyplot as plt
except:
print "Can't import matplotlib, will not produce graphs."
generateplots = False
observe = 'position^2' # the observable, one of 'mean square displacement','position', or 'potential energy'
# Uncomment the following line to seed the random number generated to produce reproducible output.
numpy.random.seed(0)
#=============================================================================================
# MAIN
#=============================================================================================
# Determine number of simulations.
K = numpy.size(N_k)
if numpy.shape(K_k) != numpy.shape(N_k): raise "K_k and N_k must have same dimensions."
# Determine maximum number of samples to be drawn for any state.
N_max = numpy.max(N_k)
# Compute widths of sampled distributions.
# For a harmonic oscillator with spring constant K,
# x ~ Normal(x_0, sigma^2), where sigma = 1/sqrt(beta K)
sigma_k = (beta * K_k)**-0.5
print "Gaussian widths:"
print sigma_k
# Compute the absolute dimensionless free energies of each oscillator analytically.
# f = - ln(sqrt((2 pi)/(beta K)) )
print 'Computing dimensionless free energies analytically...'
f_k_analytical = - numpy.log(numpy.sqrt(2 * numpy.pi) * sigma_k )
# Compute true free energy differences.
Deltaf_ij_analytical = numpy.zeros([K,K], dtype = numpy.float64)
for i in range(0,K):
for j in range(0,K):
Deltaf_ij_analytical[i,j] = f_k_analytical[j] - f_k_analytical[i]
# Compute ensemble averages analytically
if observe == 'RMS displacement':
A_k_analytical = sigma_k # mean square displacement
elif observe == 'potential energy':
A_k_analytical = 1/(2*beta)*numpy.ones([K],float) # By eqipartition
elif observe == 'position':
A_k_analytical = O_k # observable is the position
elif observe == 'position^2':
A_k_analytical = (1+ beta*K_k*O_k**2)/(beta*K_k) # observable is the position^2
else:
raise "Observable %s not known." % observe
# DEBUG info
print "This script will perform %d replicates of an experiment where samples are drawn from %d harmonic oscillators." % (nreplicates, K)
print "The harmonic oscillators have equilibrium positions"
print O_k
print "and spring constants"
print K_k
print "and the following number of samples will be drawn from each (can be zero if no samples drawn):"
print N_k
print ""
# Conduct a number of replicates of the same experiment
replicates_observable = [] # storage for one hash for each replicate
replicates_standobservable = [] # storage for one hash for each replicate
replicates_df = [] # storage for one hash for each replicate
replicates_fdf = [] # storage for one hash for final observable
for replicate_index in range(0,nreplicates):
print "Performing replicate %d / %d" % (replicate_index+1, nreplicates)
# Initialize a hash to store data for this replicate.
replicate_df = { }
replicate_fdf = { }
replicate_bar = { }
replicate_observable = { }
replicate_standobservable = { }
#=============================================================================================
# Generate independent data samples from K one-dimensional harmonic oscillators centered at q = 0.
#=============================================================================================
randomsample = testsystems.harmonic_oscillators.HarmonicOscillatorsTestCase(O_k=O_k, K_k=K_k, beta=beta)
[x_kn,u_kln,N_k] = randomsample.sample(N_k,mode='u_kln')
# get the unreduced energies
U_kln = u_kln/beta
#=============================================================================================
# Estimate free energies and expectations.
#=============================================================================================
# Initialize the MBAR class, determining the free energies.
mbar = MBAR(u_kln, N_k, relative_tolerance=1.0e-10,verbose=False) # use fast Newton-Raphson solver
(Deltaf_ij_estimated, dDeltaf_ij_estimated, _theta) = mbar.getFreeEnergyDifferences()
# Compute error from analytical free energy differences.
Deltaf_ij_error = Deltaf_ij_estimated - Deltaf_ij_analytical
# Estimate the expectation of the mean-squared displacement at each condition.
if observe == 'RMS displacement':
A_kn = numpy.zeros([K,K,N_max], dtype = numpy.float64);
for k in range(0,K):
for l in range(0,K):
A_kn[k,l,0:N_k[k]] = (x_kn[k,0:N_k[k]] - O_k[l])**2 # observable is the squared displacement
# observable is the potential energy, a 3D array since the potential energy is a function of
# thermodynamic state
elif observe == 'potential energy':
A_kn = U_kln
# observable for estimation is the position
elif observe == 'position':
A_kn = numpy.zeros([K,N_max], dtype = numpy.float64)
for k in range(0,K):
A_kn[k,0:N_k[k]] = x_kn[k,0:N_k[k]]
elif observe == 'position^2':
A_kn = numpy.zeros([K,N_max], dtype = numpy.float64)
for k in range(0,K):
A_kn[k,0:N_k[k]] = x_kn[k,0:N_k[k]]**2
(A_k_estimated, dA_k_estimated) = mbar.computeExpectations(A_kn)
As_k_estimated = numpy.zeros([K],numpy.float64)
dAs_k_estimated = numpy.zeros([K],numpy.float64)
# 'standard' expectation averages
ifzero = numpy.array(N_k != 0)
for k in range(K):
if (ifzero[k]):
if (observe == 'position') or (observe == 'position^2'):
As_k_estimated[k] = numpy.average(A_kn[k,0:N_k[k]])
dAs_k_estimated[k] = numpy.sqrt(numpy.var(A_kn[k,0:N_k[k]])/(N_k[k]-1))
elif (observe == 'RMS displacement' ) or (observe == 'potential energy'):
As_k_estimated[k] = numpy.average(A_kn[k,k,0:N_k[k]])
dAs_k_estimated[k] = numpy.sqrt(numpy.var(A_kn[k,k,0:N_k[k]])/(N_k[k]-1))
print A_k_estimated
print dA_k_estimated
# need to additionally transform to get the square root
if observe == 'RMS displacement':
A_k_estimated = numpy.sqrt(A_k_estimated)
As_k_estimated = numpy.sqrt(As_k_estimated)
# Compute error from analytical observable estimate.
dA_k_estimated = dA_k_estimated/(2*A_k_estimated)
dAs_k_estimated = dAs_k_estimated/(2*As_k_estimated)
A_k_error = A_k_estimated - A_k_analytical
As_k_error = As_k_estimated - A_k_analytical
#=============================================================================================
# Store data for this replicate.
#=============================================================================================
replicate_df['estimated'] = Deltaf_ij_estimated.copy()
replicate_df['destimated'] = numpy.asarray(dDeltaf_ij_estimated.copy())
replicate_df['error'] = Deltaf_ij_error.copy()
replicates_df.append(replicate_df)
replicate_observable['estimated'] = A_k_estimated.copy()
replicate_observable['destimated'] = numpy.asarray(dA_k_estimated.copy())
replicate_observable['error'] = A_k_error.copy()
replicates_observable.append(replicate_observable)
replicate_standobservable['estimated'] = As_k_estimated[ifzero].copy()
replicate_standobservable['destimated'] = dAs_k_estimated[ifzero].copy()
replicate_standobservable['error'] = As_k_error[ifzero].copy()
replicates_standobservable.append(replicate_standobservable)
# compute the probability distribution of all states
print "Free energies"
# compute anderson/darling statistics
D = confidenceintervals.AndersonDarling(replicates_df,K)
print "Anderson-Darling Metrics (see README.md)"
print D
if (generateplots):
confidenceintervals.QQPlot(replicates_df,K,title='Q-Q plots of free energy differences',filename="QQdf.pdf")
(alpha_fij,Pobs_fij,Plow_fij,Phigh_fij,dPobs_fij,Pnorm_fij) = confidenceintervals.generateConfidenceIntervals(replicates_df,K)
print "Standard ensemble averaged observables"
(alpha_Ai,Pobs_Ai,Plow_Ai,Phigh_Ai,dPobs_Ai,Pnorm_Ai) = confidenceintervals.generateConfidenceIntervals(replicates_standobservable,numpy.sum(ifzero))
D = confidenceintervals.AndersonDarling(replicates_standobservable,numpy.sum(ifzero))
print "Anderson-Darling Metrics (see README.md)"
print D
if (generateplots):
confidenceintervals.QQPlot(replicates_standobservable,numpy.sum(ifzero),title='Q-Q plots of ensemble averaged observables \n with standard error estimates', filename="QQstandardobserve.pdf")
print "MBAR ensemble averaged observables"
(alpha_Ai,Pobs_Ai,Plow_Ai,Phigh_Ai,dPobs_Ai,Pnorm_Ai) = confidenceintervals.generateConfidenceIntervals(replicates_observable,K)
D = confidenceintervals.AndersonDarling(replicates_observable,K)
print "Anderson-Darling Metrics (see README.md)"
print D
if (generateplots):
confidenceintervals.QQPlot(replicates_observable,K,title='Q-Q plots of ensemble averaged observables using MBAR', filename="QQMBARobserve.pdf")
if (generateplots):
override = {
'family' : 'sans-serif',
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center',
'weight' : 'bold',
'size' : 30
}
formatstrings = ['b-','g-','c-','y-','r-','m-']
if (generateplots):
plt.figure(1);
plt.axis([0.0, 4.0, 0.0, 1.0])
plt.plot(alpha_fij,Pnorm_fij,'k-',label="Normal")
for k in range(1,K):
replicates_fdf = []
for replicate_ij in replicates_df:
replicate = {}
replicate['estimated'] = replicate_ij['estimated'][0,k]
replicate['destimated'] = replicate_ij['destimated'][0,k]
replicate['error'] = replicate_ij['error'][0,k]
replicates_fdf.append(replicate)
# compute the distribution of the end states only
print ""
print " ==== State %d alone with MBAR ===== " %(k)
(alpha_f,Pobs_f,Plow_f,Phigh_f,dPobs_f,Pnorm_f) = confidenceintervals.generateConfidenceIntervals(replicates_fdf,K);
label = 'State %d' % k
if (generateplots):
plt.plot(alpha_f,Pobs_f,formatstrings[k-1],label=label)
if (generateplots):
plt.title('Cumulative Probabilty vs. Normal Distribution',size=24)
plt.xlabel('Standard Deviations',size = 18)
plt.ylabel('Cumulative Probability',size= 18)
plt.legend(loc=4)
plt.savefig('cumulative_probability_comparison_curves.pdf')
if (generateplots):
plt.figure(2);
plt.axis([0.0, 4.0, 0.0, 1.0])
plt.plot(alpha_fij,Pnorm_fij,'k-',label="Normal")
|
kyleabeauchamp/pymbar
|
examples/harmonic-oscillators/harmonic-oscillators-distributions.py
|
Python
|
lgpl-2.1
| 12,247
|
[
"Gaussian"
] |
77d38c6afea68b095e21a11394d5cd043afc8ddc5d16892c0f19bd517d90de9e
|
"""
SLSClient class is a client for the SLS DB, looking for Status of a given Service.
"""
import socket
import urllib2
from xml.dom import minidom
from DIRAC import S_OK, S_ERROR
def getAvailabilityStatus( sls_id, timeout = None ):
"""
Return actual SLS availability status of entity in sls_id.
Use SLS API: fast!!
:params:
:attr:`sls_id`: string - sls_id of the service
Returns: { "Availability": <int>, "Weblink": <str> }
"""
socket.setdefaulttimeout( timeout )
try:
res = urllib2.urlopen("http://sls.cern.ch/sls/getServiceAvailability.php?id=" + sls_id).read()
except urllib2.URLError as exc:
return S_ERROR(str(exc))
if "ERROR: Couldn't find service" in res:
return S_ERROR( "The service is not monitored with SLS" )
elif "ERROR:" in res:
return S_ERROR("Unknown SLS error")
else:
return S_OK( { "Availability": int(res), "Weblink": "https://sls.cern.ch/sls/service.php?id=" + sls_id})
def getServiceInfo( sls_id, timeout = None ):
"""
Return actual SLS "additional service information" as a dict.
(Parse SLS update XML)
:params:
:attr:`sls_id` : string - sls_id of the service
"""
socket.setdefaulttimeout( timeout )
try:
sls = urllib2.urlopen("http://sls.cern.ch/sls/update/" + sls_id + '.xml')
doc = minidom.parse( sls )
numericValues = doc.getElementsByTagName( "numericvalue" )
except Exception as exc:
return S_ERROR(str(exc))
return S_OK(dict([(nv.getAttribute("name"), float(nv.firstChild.nodeValue)) for nv in numericValues]))
|
avedaee/DIRAC
|
Core/LCG/SLSClient.py
|
Python
|
gpl-3.0
| 1,532
|
[
"DIRAC"
] |
9369029db2f37534be65d8096896fc2626e596aec952e7891b03594ae5232f1e
|
"""
This pipeline is intended to extract spatial information --- euclidean
distance to the neareast neighbour contour point --- from standalone file.
"""
import os
import numpy as np
from protoclass.data_management import T2WModality
from protoclass.data_management import GTModality
from protoclass.preprocessing import RicianNormalization
from protoclass.preprocessing import GaussianNormalization
from protoclass.extraction import SpatialExtraction
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_t2w = 'T2W'
# Define the path of the ground for the prostate
path_gt = 'GT_inv/prostate'
# Define the label of the ground-truth which will be provided
label_gt = ['prostate']
# Define the path where the information for the gaussian normalization are
path_gaussian = '/data/prostate/pre-processing/mp-mri-prostate/gaussian-t2w'
# Define the path where the information for the rician normalization are
path_rician = '/data/prostate/pre-processing/mp-mri-prostate/rician-t2w'
# Define the path to store the Tofts data
path_store = '/data/prostate/extraction/mp-mri-prostate/spatial-dist-contour'
# ID of the patient for which we need to use the Gaussian Normalization
ID_GAUSSIAN = '387'
# Generate the different path to be later treated
path_patients_list_t2w = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the T2W data
path_patients_list_t2w.append(os.path.join(path_patients, id_patient,
path_t2w))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient,
path_gt)])
# List where to store the different minimum
for id_p, (p_t2w, p_gt) in enumerate(zip(path_patients_list_t2w,
path_patients_list_gt)):
print 'Processing {}'.format(id_patient_list[id_p])
# Remove a part of the string to have only the id
nb_patient = id_patient_list[id_p].replace('Patient ', '')
# Read the image data
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(p_t2w)
# Read the GT
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, p_gt)
if not nb_patient == ID_GAUSSIAN:
# Rician Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_rician, pat_chg)
t2w_norm = RicianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
else:
# Gaussian Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_gaussian, pat_chg)
t2w_norm = GaussianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
# Create an object to extract the euclidean distance to the center
spe = SpatialExtraction(t2w_mod, kind='distance', coord_system='euclidean',
reference='nn-contour-point')
# Compute the necessary information
print 'Compute the distances'
spe.fit(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Extract the distances
print 'Extract the distanced corresponding to the ROI'
data = spe.transform(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Store the data
print 'Store the matrix'
# Check that the path is existing
if not os.path.exists(path_store):
os.makedirs(path_store)
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_spe.npy'
filename = os.path.join(path_store, pat_chg)
np.save(filename, data)
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-extraction/spatial/pipeline_extraction_distance_contour.py
|
Python
|
mit
| 4,060
|
[
"Gaussian"
] |
935cfc0273578b35a28891d0713971989b0fcf4faa7ace82ede4f5d91ff6d08e
|
from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
# ------------------------------------------------------------------------------- #
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
# ------------------------------------------------------------------------------- #
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
# ------------------------------------------------------------------------------- #
def visit_Num(self, node):
return node.n
# ------------------------------------------------------------------------------- #
def visit_Expr(self, node):
return self.visit(node.value)
# ------------------------------------------------------------------------------- #
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
# ------------------------------------------------------------------------------- #
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
# ------------------------------------------------------------------------------- #
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
# ------------------------------------------------------------------------------- #
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
# ------------------------------------------------------------------------------- #
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
# ------------------------------------------------------------------------------- #
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
# ------------------------------------------------------------------------------- #
def jsfuckToNumber(jsFuck):
# "Clean Up" JSFuck
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
# Hackery Parser for Math
stack = []
bstack = []
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
# ------------------------------------------------------------------------------- #
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
# ------------------------------------------------------------------------------- #
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
# ------------------------------------------------------------------------------- #
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
# ------------------------------------------------------------------------------- #
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
# ------------------------------------------------------------------------------- #
# if not jsfuckChallenge['k'] and '+ t.length' in body:
# jschl_answer += len(domain)
# ------------------------------------------------------------------------------- #
return '{0:.10f}'.format(jschl_answer)
# ------------------------------------------------------------------------------- #
return challengeSolve(body, domain)
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()
|
alfa-addon/addon
|
plugin.video.alfa/lib/cloudscraper/interpreters/native.py
|
Python
|
gpl-3.0
| 8,626
|
[
"VisIt"
] |
953652c4338f30cd48a119c120afaf4beb73bdb64b7020d1edd058464fd461d2
|
"""
Acceptance tests for the certificate web view feature.
"""
from common.test.acceptance.tests.helpers import UniqueCourseTest, EventsTestMixin, load_data_str, get_element_padding
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
from common.test.acceptance.fixtures.certificates import CertificateConfigFixture
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.certificate_page import CertificatePage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.progress import ProgressPage
@attr(shard=5)
class CertificateWebViewTest(EventsTestMixin, UniqueCourseTest):
"""
Tests for verifying certificate web view features
"""
def setUp(self):
super(CertificateWebViewTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "335535897951379478207964576572017930000"
test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
course_settings = {'certificates': test_certificate_config}
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
settings=course_settings
)
self.course_fixture.add_advanced_settings({
"cert_html_view_enabled": {"value": "true"}
})
self.course_fixture.install()
self.user_id = "99" # we have created a user with this id in fixture
self.cert_fixture = CertificateConfigFixture(self.course_id, test_certificate_config)
# Load certificate web view page for use by the tests
self.certificate_page = CertificatePage(self.browser, self.user_id, self.course_id)
def log_in_as_unique_user(self):
"""
Log in as a valid lms user.
"""
AutoAuthPage(
self.browser,
username="testcert",
email="cert@example.com",
password="testuser",
course_id=self.course_id
).visit()
def test_page_has_accomplishments_banner(self):
"""
Scenario: User accomplishment banner should be present if logged in user is the one who is awarded
the certificate
Given there is a course with certificate configuration
And I have passed the course and certificate is generated
When I view the certificate web view page
Then I should see the accomplishment banner. banner should have linked-in and facebook share buttons
And When I click on `Add to Profile` button `edx.certificate.shared` event should be emitted
"""
self.cert_fixture.install()
self.log_in_as_unique_user()
self.certificate_page.visit()
self.assertTrue(self.certificate_page.accomplishment_banner.visible)
self.assertTrue(self.certificate_page.add_to_linkedin_profile_button.visible)
self.assertTrue(self.certificate_page.add_to_facebook_profile_button.visible)
self.certificate_page.add_to_linkedin_profile_button.click()
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.certificate.shared'},
number_of_matches=1
)
expected_events = [
{
'event': {
'user_id': self.user_id,
'course_id': self.course_id
}
}
]
self.assert_events_match(expected_events, actual_events)
@attr(shard=5)
class CertificateProgressPageTest(UniqueCourseTest):
"""
Tests for verifying Certificate info on Progress tab of course page.
"""
def setUp(self):
super(CertificateProgressPageTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "3355358979513794782079645765720179311111"
test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
course_settings = {'certificates': test_certificate_config}
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
settings=course_settings
)
self.course_fixture.add_advanced_settings({
"cert_html_view_enabled": {"value": "true"}
})
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection', grader_type='Final Exam').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2', grader_type='Midterm Exam').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
)
)
)
self.course_fixture.install()
self.user_id = "99" # we have created a user with this id in fixture
self.cert_fixture = CertificateConfigFixture(self.course_id, test_certificate_config)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
def log_in_as_unique_user(self):
"""
Log in as a valid lms user.
"""
AutoAuthPage(
self.browser,
username="testprogress",
email="progress@example.com",
password="testuser",
course_id=self.course_id
).visit()
def test_progress_page_has_view_certificate_button(self):
"""
Scenario: View Certificate option should be present on Course Progress menu if the user is
awarded a certificate.
And their should be no padding around the box containing certificate info. (See SOL-1196 for details on this)
As a Student
Given there is a course with certificate configuration
And I have passed the course and certificate is generated
When I go on the Progress tab for the course
Then I should see a 'View Certificate' button
And their should be no padding around Certificate info box.
"""
self.cert_fixture.install()
self.log_in_as_unique_user()
self.complete_course_problems()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
self.assertTrue(self.progress_page.q(css='.auto-cert-message').first.visible)
actual_padding = get_element_padding(self.progress_page, '.wrapper-msg.wrapper-auto-cert')
actual_padding = [int(padding) for padding in actual_padding.itervalues()]
expected_padding = [0, 0, 0, 0]
# Verify that their is no padding around the box containing certificate info.
self.assertEqual(actual_padding, expected_padding)
def complete_course_problems(self):
"""
Complete Course Problems.
Problems were added in the setUp
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
# Navigate to Test Subsection in Test Section Section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Navigate to Test Problem 1
self.course_nav.go_to_vertical('Test Problem 1')
# Select correct value for from select menu
self.course_nav.q(css='select option[value="{}"]'.format('blue')).first.click()
# Select correct radio button for the answer
self.course_nav.q(css='fieldset div.field:nth-child(4) input').nth(0).click()
# Select correct radio buttons for the answer
self.course_nav.q(css='fieldset div.field:nth-child(2) input').nth(1).click()
self.course_nav.q(css='fieldset div.field:nth-child(4) input').nth(1).click()
# Submit the answer
self.course_nav.q(css='button.submit').click()
self.course_nav.wait_for_ajax()
# Navigate to the 'Test Subsection 2' of 'Test Section 2'
self.course_nav.go_to_section('Test Section 2', 'Test Subsection 2')
# Navigate to Test Problem 2
self.course_nav.go_to_vertical('Test Problem 2')
# Fill in the answer of the problem
self.course_nav.q(css='input[id^=input_][id$=_2_1]').fill('A*x^2 + sqrt(y)')
# Submit the answer
self.course_nav.q(css='button.submit').click()
self.course_nav.wait_for_ajax()
|
TheMOOCAgency/edx-platform
|
common/test/acceptance/tests/lms/test_certificate_web_view.py
|
Python
|
agpl-3.0
| 9,797
|
[
"VisIt"
] |
7b13851ede172650d85e70552582e0cbc761510596ad155589ab8051b91c611b
|
# -*- coding: utf-8 -*-
#
# Maximum Temperature Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Tools.HardwareInfo import HardwareInfo
from enigma import eLabel
from Renderer import Renderer
from os import popen
class DMCHDMaxTemp(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
if "8000" in HardwareInfo().get_device_name() or "800se" in HardwareInfo().get_device_name() or "500" in HardwareInfo().get_device_name():
self.ZeigeTemp = True
else:
self.ZeigeTemp = False
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
if self.ZeigeTemp:
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
self.text = str(maxtemp) + "°C"
else:
loada = 0
try:
out_line = popen("cat /proc/loadavg").readline()
loada = out_line[:4]
except:
pass
self.text = loada
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
|
sklnet/opendroid-enigma2
|
lib/python/Components/Renderer/DMCHDMaxTemp.py
|
Python
|
gpl-2.0
| 2,083
|
[
"VisIt"
] |
02040f3be5e9012472d80eb2d22c0343a7ef7f02a1809d3663e8a957c4021194
|
#! test fragment decomposition + to/from_dict
import numpy as np
import psi4
from psi4.driver import qcdb
def test_chgmult(expected, cgmpdict, label):
rc, rfc, rm, rfm = expected
qcdb.compare_integers(rc, cgmpdict['molecular_charge'], label + ': c')
qcdb.compare_integers(rm, cgmpdict['molecular_multiplicity'], label + ': m')
qcdb.compare_integers(True, np.allclose(cgmpdict['fragment_charges'], rfc), label + ': fc')
qcdb.compare_integers(True, np.allclose(cgmpdict['fragment_multiplicities'], rfm), label + ': fm')
def test_dimer(mol, expected_cgmp, label, mtype):
mol.update_geometry()
dAB = mol.to_dict()
test_chgmult(expected_cgmp['AB'], dAB, label + ' AB')
mAB = mtype.from_dict(dAB)
qcdb.compare_molrecs(dAB, mAB.to_dict(), 6, label + ' AB roundtrip')
aB = mol.extract_subsets(2, 1)
daB = aB.to_dict()
test_chgmult(expected_cgmp['aB'], daB, label + ' aB')
maB = mtype.from_dict(daB)
qcdb.compare_molrecs(daB, maB.to_dict(), 6, label + ' aB roundtrip')
Ab = mol.extract_subsets(1, 2)
dAb = Ab.to_dict()
test_chgmult(expected_cgmp['Ab'], dAb, label + ' Ab')
mAb = mtype.from_dict(dAb)
qcdb.compare_molrecs(dAb, mAb.to_dict(), 6, label + ' Ab roundtrip')
A_ = mol.extract_subsets(1)
dA_ = A_.to_dict()
test_chgmult(expected_cgmp['A_'], dA_, label + ' A_')
mA_ = mtype.from_dict(dA_)
qcdb.compare_molrecs(dA_, mA_.to_dict(), 6, label + ' A_ roundtrip')
_B = mol.extract_subsets(2)
d_B = _B.to_dict()
test_chgmult(expected_cgmp['_B'], d_B, label + ' _B')
m_B = mtype.from_dict(d_B)
qcdb.compare_molrecs(d_B, m_B.to_dict(), 6, label + ' _B roundtrip')
qcdb.compare_integers(True, type(mol) == mtype, label + ': AB type')
qcdb.compare_integers(True, type(Ab) == mtype, label + ': Ab type')
eneyne = """
C 0.000000 -0.667578 -2.124659
C 0.000000 0.667578 -2.124659
H 0.923621 -1.232253 -2.126185
H -0.923621 -1.232253 -2.126185
H -0.923621 1.232253 -2.126185
H 0.923621 1.232253 -2.126185
--
C 0.000000 0.000000 2.900503
C 0.000000 0.000000 1.693240
H 0.000000 0.000000 0.627352
H 0.000000 0.000000 3.963929
"""
eneyne_cgmp = {
'AB': (0, [0, 0], 1, [1, 1]),
'aB': (0, [0, 0], 1, [1, 1]),
'Ab': (0, [0, 0], 1, [1, 1]),
'A_': (0, [0], 1, [1]),
'_B': (0, [0], 1, [1]),
}
negpos = """
-1 1
O 0.0 0.0 0.0
H 0.0 0.0 1.0
--
1 1
O 2.0 2.0 2.0
H 3.0 2.0 2.0
H 2.0 3.0 2.0
H 2.0 2.0 3.0
"""
negpos_cgmp = {
'AB': (0, [-1, 1], 1, [1, 1]),
'A_': (-1, [-1], 1, [1]),
'_B': (1, [1], 1, [1]),
'Ab': (-1, [-1, 0], 1, [1, 1]),
'aB': (1, [0, 1], 1, [1, 1]),
}
qeneyne = qcdb.Molecule(eneyne)
peneyne = psi4.geometry(eneyne)
qnegpos = qcdb.Molecule(negpos)
pnegpos = psi4.geometry(negpos)
test_dimer(qeneyne, eneyne_cgmp, 'Q: eneyne', qcdb.Molecule)
test_dimer(peneyne, eneyne_cgmp, 'P: eneyne', psi4.core.Molecule)
test_dimer(qnegpos, negpos_cgmp, 'Q: negpos', qcdb.Molecule)
test_dimer(pnegpos, negpos_cgmp, 'P: negpos', psi4.core.Molecule)
# Once user starts messing with cgmp other than in construction, user has
# no way to mess with fragment cgmp, and Psi/QCDB Molecule classes don't do
# much to set things in order. Upon to_dict, things get sorted into some
# physical reality, but fragment charges in a complicated system like this
# won't get sorted out to resemble thier initial state (could do more
# try/catch, but that's really the class's job). So really all that can be
# tested in the main dimer's total charge and total mult.
qnegpos.set_multiplicity(3)
qnegpos.set_molecular_charge(2)
qresetAB = qnegpos.to_dict()
qcdb.compare_integers(2, qresetAB['molecular_charge'], 'Q: reset-negpos: c')
qcdb.compare_integers(3, qresetAB['molecular_multiplicity'], 'Q: reset-negpos: m')
pnegpos.set_multiplicity(3)
pnegpos.set_molecular_charge(2)
presetAB = pnegpos.to_dict()
qcdb.compare_integers(2, presetAB['molecular_charge'], 'P: reset-negpos: c')
qcdb.compare_integers(3, presetAB['molecular_multiplicity'], 'P: reset-negpos: m')
|
amjames/psi4
|
samples/python/mints13/test.py
|
Python
|
lgpl-3.0
| 4,091
|
[
"Psi4"
] |
cc3d9c5ce6177f17a35ec44a49471e2d65d7355cb4a04f24b6182f81fb0e8d12
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The UI widgets of the plugin view dialog
#"""
from PyQt4 import QtCore, QtGui
from openlp.core.lib import UiStrings, translate
from openlp.core.lib.ui import create_button_box
class Ui_PluginViewDialog(object):
"""
The UI of the plugin view dialog
"""
def setupUi(self, pluginViewDialog):
"""
Set up the UI
"""
pluginViewDialog.setObjectName('pluginViewDialog')
pluginViewDialog.setWindowModality(QtCore.Qt.ApplicationModal)
self.pluginLayout = QtGui.QVBoxLayout(pluginViewDialog)
self.pluginLayout.setObjectName('pluginLayout')
self.listLayout = QtGui.QHBoxLayout()
self.listLayout.setObjectName('listLayout')
self.pluginListWidget = QtGui.QListWidget(pluginViewDialog)
self.pluginListWidget.setObjectName('pluginListWidget')
self.listLayout.addWidget(self.pluginListWidget)
self.pluginInfoGroupBox = QtGui.QGroupBox(pluginViewDialog)
self.pluginInfoGroupBox.setObjectName('pluginInfoGroupBox')
self.pluginInfoLayout = QtGui.QFormLayout(self.pluginInfoGroupBox)
self.pluginInfoLayout.setObjectName('pluginInfoLayout')
self.statusLabel = QtGui.QLabel(self.pluginInfoGroupBox)
self.statusLabel.setObjectName('statusLabel')
self.statusComboBox = QtGui.QComboBox(self.pluginInfoGroupBox)
self.statusComboBox.addItems(('', ''))
self.statusComboBox.setObjectName('statusComboBox')
self.pluginInfoLayout.addRow(self.statusLabel, self.statusComboBox)
self.versionLabel = QtGui.QLabel(self.pluginInfoGroupBox)
self.versionLabel.setObjectName('versionLabel')
self.versionNumberLabel = QtGui.QLabel(self.pluginInfoGroupBox)
self.versionNumberLabel.setObjectName('versionNumberLabel')
self.pluginInfoLayout.addRow(self.versionLabel, self.versionNumberLabel)
self.aboutLabel = QtGui.QLabel(self.pluginInfoGroupBox)
self.aboutLabel.setObjectName('aboutLabel')
self.aboutTextBrowser = QtGui.QTextBrowser(self.pluginInfoGroupBox)
self.aboutTextBrowser.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.aboutTextBrowser.setObjectName('aboutTextBrowser')
self.pluginInfoLayout.addRow(self.aboutLabel, self.aboutTextBrowser)
self.listLayout.addWidget(self.pluginInfoGroupBox)
self.pluginLayout.addLayout(self.listLayout)
self.button_box = create_button_box(pluginViewDialog, 'button_box', ['ok'])
self.pluginLayout.addWidget(self.button_box)
self.retranslateUi(pluginViewDialog)
def retranslateUi(self, pluginViewDialog):
"""
Translate the UI on the fly
"""
pluginViewDialog.setWindowTitle(translate('OpenLP.PluginForm', 'Plugin List'))
self.pluginInfoGroupBox.setTitle(translate('OpenLP.PluginForm', 'Plugin Details'))
self.versionLabel.setText('%s:' % UiStrings().Version)
self.aboutLabel.setText('%s:' % UiStrings().About)
self.statusLabel.setText(translate('OpenLP.PluginForm', 'Status:'))
self.statusComboBox.setItemText(0, translate('OpenLP.PluginForm', 'Active'))
self.statusComboBox.setItemText(1, translate('OpenLP.PluginForm', 'Inactive'))
|
marmyshev/item_title
|
openlp/core/ui/plugindialog.py
|
Python
|
gpl-2.0
| 5,363
|
[
"Brian"
] |
aae43906326840ec2192847760084482bf68854bf98c864b2c95fd2e8ed43582
|
'''
Code for generating the sham catalogs.
Author : ChangHoon Hahn
'''
import os
import h5py
import numpy as np
from numpy import log10, Inf
from scipy import integrate, interpolate, ndimage
def DownloadedCatalog(catalog='bolshoi'):
''' Take .list format downloaded catalog and load it into an .hdf5 file format
so that the halo catalog can be easily read
'''
current_dir = os.path.dirname(os.path.realpath(__file__))
if current_dir != '/home/mj/assembly/code':
raise ValueError("This function is only meant to be run on Chang's user directory on Sirocco!")
if catalog == 'bolshoi': # Bolshoi Box
list_file = ''.join([
'/export/bbq2/mj/',
'hlist_1.00231.list'
])
a_scale = 1.00231
redshift = 0.0 # close enough
elif catalog == 'smdpl': # Small MultiDark Planck
list_file = ''.join([
'/export/bbq2/mj/',
'smdpl_hlist_1.00000.list'
])
a_scale = 1.00
redshift = 0.0 # close enough
# read in first few lines of the file to get the columns and column indicies
f = open(list_file, 'r')
first_line = f.readline()
col_list = first_line[1:].replace(')\n', '').replace(') ',',').split(',') # don't touch! stirng parsing magic
columns, column_indices = [], []
for ccc in col_list:
columns.append('('.join(ccc.rsplit('(')[:-1]))
column_indices.append(ccc.rsplit('(')[-1])
list_data = np.loadtxt(list_file) # this takes forever
# save all columns to hdf5
if catalog == 'bolshoi':
hdf5_file = ''.join([
'/export/bbq2/mj/',
catalog, '_a1.00231.hdf5'])
elif catalog == 'smdpl':
hdf5_file = ''.join([
'/export/bbq2/mj/',
catalog, '_a1.00000.hdf5'])
f = h5py.File(hdf5_file, 'w')
grp = f.create_group('data')
grp.attrs['a_scale'] = a_scale
grp.attrs['redshift'] = redshift
for i_col, col in enumerate(columns):
if ('id' in col) or ('ID' in col):
data_column = list_data[:,i_col].astype('int')
elif col == '':
continue
else:
data_column = list_data[:,i_col]
grp.create_dataset(col, data=data_column)
f.close()
return None
def add_to_catalog(catalog='bolshoi'):
from astropy.table import Table
from halotools.utils import group_member_generator
a_scale = 1.00
redshift = 0.0 # close enough
if catalog == 'bolshoi':
hdf5_file = ''.join([
'/export/bbq2/mj/',
catalog, '_a1.00231.hdf5'])
hdf5_file_new = ''.join([
'/export/bbq2/mj/',
catalog, '_new_a1.00231.hdf5'])
elif catalog == 'smdpl':
hdf5_file = ''.join([
'/export/bbq2/mj/',
catalog, '_a1.00000.hdf5'])
hdf5_file_new = ''.join([
'/export/bbq2/mj/',
catalog, '_new_a1.00000.hdf5'])
f = h5py.File(hdf5_file, 'r')
grp = f["data"]
#cols = grp.keys()
cols = ['id', 'pid', 'upid', 'x', 'y', 'z', 'vx', 'vy', 'vz',
'Vpeak', 'Mpeak', 'vrms', 'mvir', 'rvir', 'Vmax@Mpeak', 'Mpeak_Scale', 'rs']
names = [col for col in grp.keys()]
#table_content = [grp[col] for col in grp.keys()]
idd = grp['id'][:]
pid = grp['pid'][:]
upid = grp['upid'][:]
x = grp['x'][:]
y = grp['y'][:]
z = grp['z'][:]
vx = grp['vx'][:]
vy = grp['vy'][:]
vz = grp['vz'][:]
Vpeak = grp['Vpeak'][:]
Mpeak = grp['Mpeak'][:]
vrms = grp['vrms'][:]
mvir = grp['mvir'][:]
rvir = grp['rvir'][:]
Vmax_at_Mpeak = grp['Vmax@Mpeak'][:]
Mpeak_Scale = grp['Mpeak_Scale'][:]
rs = grp['rs'][:]
table = Table([idd, pid, upid, x, y, z, vx, vy, vz, Vpeak, Mpeak, vrms, mvir, rvir,
Vmax_at_Mpeak, Mpeak_Scale, rs], names = cols, meta= {'name': 'first table'})
host_id = idd[:].copy()
sats = np.where(upid[:]!= -1)[0]
host_id[sats] = upid[:][sats]
table['host_id'] = host_id
table.sort(['host_id','upid'])
grouping_key = 'host_id'
requested_columns = ['mvir']
group_gen = group_member_generator(table, grouping_key, requested_columns)
host_mass = np.zeros(len(table))
for first, last, member_props in group_gen:
mvir_members = member_props[0]
mvir_host = mvir_members[0]
host_mass[first:last] = mvir_host
table['host_mass'] = host_mass
f_new = h5py.File(hdf5_file_new, 'w')
grp_new = f_new.create_group('data')
columns = table.colnames
grp_new.attrs['a_scale'] = a_scale
grp_new.attrs['redshift'] = redshift
for i_col, col in enumerate(columns):
data_column = table[col]
grp_new.create_dataset(col, data=data_column)
f_new.close()
return None
class Halos(object):
'''
Class to deal with halo catalogs downloaded from Peter Behroozi's
Rockstar website.
> http://hipacc.ucsc.edu/Bolshoi/MergerTrees.html
'''
def __init__(self, catalog='bolshoi'):
'''
'''
self.column_list = ['id', 'pid', 'upid', 'x', 'y', 'z', 'vx', 'vy', 'vz',
'Vpeak', 'Mpeak', 'vrms', 'mvir', 'rvir', 'Vmax@Mpeak', 'Mpeak_Scale', 'rs', 'host_id', 'host_mass']
if catalog not in ['bolshoi', 'smdpl']:
raise NotImplementedError("Catalog not included yet")
self.catalog = catalog
def File(self):
'''
'''
file_dir = '/export/bbq2/mj/'
if self.catalog == 'bolshoi':
self.file_name = ''.join([file_dir, 'bolshoi_new_a1.00231.hdf5'])
elif self.catalog == 'smdpl':
self.file_name = ''.join([file_dir, 'smdpl_new_a1.00000.hdf5'])
return self.file_name
def Read(self):
''' Read in hdf5 file
'''
file = self.File()
f = h5py.File(file, 'r')
grp = f['data']
for attr in grp.attrs.keys():
setattr(self, attr, grp.attrs[attr])
# import *select* columns
cols = self.Columns()
for col in cols:
if col == 'Mpeak':
setattr(self, col, np.log10(grp[col][:]))
elif col == 'Vmax@Mpeak':
setattr(self, "VmaxMpeak", grp[col][:])
else:
setattr(self, col, grp[col][:])
return None
def Columns(self):
return self.column_list
class shamHalos(object):
'''
Class to deal with halo catalogs with SHAMed M_r or stellar mass
'''
def __init__(self, catalog='bolshoi', sham_dict=None):
self.catalog = catalog
self.sham_dict = sham_dict # dictionary specifying the SHAM choices
self.column_list = None
def ReadHaloCatalog(self):
''' Read in halo catalog using the Halos object class and import
the columns into object.
'''
halocat = Halos(catalog=self.catalog)
halocat.Read()
for col in halocat.Columns():
if col == "Vmax@Mpeak":
setattr(self, "VmaxMpeak", getattr(halocat, "VmaxMpeak"))
else:
setattr(self, col, getattr(halocat, col))
self.a_scale = halocat.a_scale
self.redshift = halocat.redshift
self.column_list = halocat.column_list
return None
def SHAMassign(self, m_kind='mstar', scat=0, source='', sham_prop='Mpeak'):
'''
Assign Mag_r or M_star via abundance matching.
'''
self.ReadHaloCatalog()
#self.sham_dict = {'m_kind': m_kind, 'scat': scat, 'source': source, 'sham_prop': sham_prop}
if m_kind not in self.column_list:
self.column_list.append(m_kind)
vol = (self._CatalogBox() / self._little_h()) ** 3
print 'Box Length', self._CatalogBox()
print 'Box Hubble', self._little_h()
if m_kind == 'mstar':
if not source:
source = 'li-drory-march'
redshift = self.z_scale
if redshift < 0.1:
redshift = 0.1
MF = SMFClass(source=source, redshift=redshift, scat=scat, hubble=self._little_h())
elif m_kind == 'mag_r':
if source == 'cool_ages':
redshift = self.z_scale
if redshift < 0.1:
redshift = 0.1
MF = LFClass(source=source, scat=scat, hubble=self._little_h(), redshift=redshift)
else:
if not source:
source = 'blanton'
MF = LFClass(source, scat, self._little_h())
else:
raise ValueError('not recognize m_kind = %s' % m_kind)
if sham_prop == 'tailored':
# special request SHAM :
# v_vir * (v_max / v_vir)^0.57
sham_attr = np.zeros(len(self.vrms))
delta = 360
omegam = 0.307115
omegal = 0.692885
H_scale = omegam / (self.Mpeak_Scale)**3. + omegal
v_vir = (0.5 * delta * H_scale ** 2. * 4.302**2. * 10.**14. )**(1./6) * (10. ** self.Mpeak)**1./3
#v_vir = ((100 ** 0.5) * (4.302 * 10 ** -7.) * (10. ** self.Mpeak))**1./3
vvir_notzero = np.where(v_vir != 0.)
sham_attr[vvir_notzero] = v_vir[vvir_notzero] * (
self.VmaxMpeak[vvir_notzero] / v_vir[vvir_notzero])**0.57
else:
sham_attr = getattr(self, sham_prop)
print 'SHAM attribute = ', sham_attr.min(), sham_attr.max()
m_kind_attr = np.zeros(len(sham_attr), np.float32)
if m_kind == 'mstar':
MF.initialize_redshift(redshift)
elif m_kind == 'mag_r':
if source == 'cool_ages':
MF.initialize_redshift(redshift)
# maximum number of objects in volume to assign given SMF/LF threshold
num_max = int(round(MF.numden(MF.mmin) * vol))
sis = elements(sham_attr, [0.001, Inf])
siis_sort = np.argsort(sham_attr[sis]).astype(sis.dtype)[::-1][:num_max]
num_sums = arange_length(num_max) + 1
if scat:
if m_kind == 'mstar':
scats = np.random.normal(np.zeros(num_max), MF.scat).astype(np.float32)
elif m_kind == 'mag_r':
scats = np.random.normal(np.zeros(num_max), 2.5 * MF.scat).astype(np.float32)
#print MF.m_scat(num_sums / vol) + scats
m_kind_attr[sis[siis_sort]] = MF.m_scat(num_sums / vol) + scats
else:
m_kind_attr[sis[siis_sort]] = MF.m(num_sums / vol)
setattr(self, m_kind, m_kind_attr)
return None
def File(self):
''' File name of SHAMed catalog
'''
if self.sham_dict is None:
raise ValueError
halocat = Halos(catalog=self.catalog)
halo_file = halocat.File()
halo_file = halo_file.rsplit('.hdf5')[0]
sham_file = ''.join([
halo_file,
'.', self.sham_dict['m_kind'],
'.source_', self.sham_dict['source'],
'.scatter', str(round(self.sham_dict['scat'], 2)),
'.', self.sham_dict['sham_prop'],
'.hdf5'])
return sham_file
def Read(self):
sham_file = self.File()
f = h5py.File(sham_file, 'r')
grp = f['data']
for col in grp.keys():
setattr(self, col, grp[col][:])
for attr in grp.attrs.keys():
setattr(self, attr, grp.attrs[attr])
return None
def Write(self):
''' Write SHAMed halo catalog to file
'''
if self.sham_dict is None:
raise ValueError
if self.column_list is None or self.sham_dict['m_kind'] not in self.column_list:
self.SHAMassign(
m_kind=sham_dict['m_kind'],
scat=sham_dict['scat'],
source=sham_dict['source'],
sham_prop=sham_dict['sham_prop']
)
sham_file = self.File()
f = h5py.File(sham_file, 'w')
grp = f.create_group('data')
columns = self.Columns()
print columns
for i_col, col in enumerate(columns):
if col == 'M200b':
col_data = getattr(self, col)
id = getattr(self, 'id')
upid = getattr(self, 'upid')
nonneg = np.where(upid != -1)[0]
sub_id, sub_upid = intersection_index(id, upid[nonneg])
col_data[nonneg[sub_upid]] = col_data[sub_id]
elif col == 'Mpeak':
col_data = np.log10(getattr(self, col))
elif col == 'Vmax@Mpeak':
col_data = getattr(self, 'VmaxMpeak')
else:
col_data = getattr(self, col)
grp.create_dataset(col, data=col_data)
grp.attrs['a_scale'] = self.a_scale
grp.attrs['redshift'] = self.redshift
grp.attrs['Lbox'] = self._CatalogBox()
grp.attrs['little_h'] = self._little_h()
f.close()
def Columns(self):
return self.column_list
def _CatalogBox(self):
if self.catalog == 'multidark':
L_box = 1000. # Mpc/h
elif self.catalog == 'smdpl':
L_box = 400. # Mpc/h
elif self.catalog == 'bolshoi':
L_box = 250. # Mpc/h
return L_box
def _little_h(self):
if self.catalog == 'bolshoi':
h_little = 0.68
elif self.catalog == 'smdpl':
h_little = 0.6777
else:
h_little = 0.7
return h_little
class SMFClass:
'''
Relate number density [dnumden / dlog(M_star/M_sun)] <-> stellar mass [log10(M_star/M_sun)]
using fits to observed stellar mass functions.
All SMFs assume input Hubble constant.
'''
def __init__(self, source='li-march', redshift=0.1, scat=0, hubble=0.7):
'''
Import SMF source, redshift, log scatter in M_star at fixed Msub.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'li':
'''
Li & White 2009. z = 0.1 from SDSS. Chabrier IMF. Complete to 1e8 M_sun/h^2.
'''
self.redshifts = np.array([0.1])
self.mchars = np.array([10.525]) - 2 * log10(hubble) # {M_sun}
self.amplitudes = np.array([0.0083]) * hubble ** 3 # {Mpc ^ -3 / log(M/M_sun)}
self.slopes = np.array([-1.155])
self.initialize_redshift(redshift)
elif source == 'baldry':
'''
Baldry et al 2008. z = 0.1 from SDSS. diet Salpeter IMF = 0.7 Salpeter.
Complete to 1e8 M_sun.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1])
# covert to Chabrier
self.mchars = (np.array([10.525]) + 2 * log10(h_them / hubble) + log10(1 / 1.6 / 0.7))
self.amplitudes = np.array([0.00426]) * (hubble / h_them) ** 3
self.amplitudes2 = np.array([0.00058]) * (hubble / h_them) ** 3
self.slopes = np.array([-0.46])
self.slopes2 = np.array([-1.58])
self.initialize_redshift(redshift)
elif source == 'cole-march':
'''
Marchesini et al 2009. 1.3 < z < 4.0. Kroupa IMF.
z = 0.1 from Cole et al 2001 (2dF), converting their Salpeter to Kroupa.
*** In order to use out to z ~ 4, made evolution flat from z = 3.5 to 4.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.65, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
# converted to {Mpc ^ -3 dex ^ -1}
self.amplitudes = np.array([90.00, 29.65, 11.52, 1.55, 1.55]) * 1e-4 * hubble ** 3
self.slopes = np.array([-1.18, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march':
'''
Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march-extreme':
'''
More extreme version of Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.00001, 0.00001, 0.00001, 0.000001]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'constant-li':
'''
Li & White at all redshifts
'''
self.redshifts = np.arange(0.1, 4.03, 0.1)
self.mchars = np.repeat(10.525, len(self.redshifts)) - 2 * log10(hubble)
self.amplitudes = (np.repeat(0.0083, len(self.redshifts))* hubble ** 3)
self.slopes = np.repeat(-1.155, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'fontana':
'''
Fontana et al 2006. 0.4 < z < 4 from GOODS-MUSIC. Salpeter IMF.
z = 0.1 from Cole et al 2001.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 4.0]) # store redshift range of validity
self.amplitude0 = 0.0035 * (hubble / h_them) ** 3 # to {Mpc ^ -3 / log10(M/M_sun)}
self.amplitude1 = -2.2
self.slope0 = -1.18
self.slope1 = -0.082
self.mchar0 = 11.16 # log10(M/M_sun)
self.mchar1 = 0.17 # log10(M/M_sun)
self.mchar2 = -0.07 # log10(M/M_sun)
# convert to my hubble & Chabrier IMF
self.mchar0 += 2 * log10(h_them / hubble) - log10(1.6)
self.initialize_redshift(redshift)
elif source == 'li-drory-march':
'''
Drory et al 2009. 0.3 < z < 1.0 from COSMOS.
Chabrier IMF limited to 0.1 - 100 M_sun.
Complete to (8.0, 8.6, 8.9, 9.1) M_sun/h^2 at z = (0.3, 0.5, 0.7, 0.9).
Anchor to Li & White at z = 0.1, Marchesini et al at higher redshift.
See Ilbert et al 2010 for alternate COSMOS version.
'''
h_them = 0.72 # their assumed hubble constant
self.redshifts = np.array([0.3, 0.5, 0.7, 0.9])
self.mchars = np.array([10.90, 10.91, 10.95, 10.92]) + 2 * log10(h_them / hubble)
# convert to [Mpc ^ -3 dex^-1]
self.amplitudes = (np.array([0.00289, 0.00174, 0.00216, 0.00294]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.06, -1.05, -0.93, -0.91])
self.mchars2 = np.array([9.63, 9.70, 9.75, 9.85]) + 2 * log10(h_them / hubble)
self.amplitudes2 = (np.array([0.00180, 0.00143, 0.00289, 0.00212]) *
(hubble / h_them) ** 3)
self.slopes2 = np.array([-1.73, -1.76, -1.65, -1.65])
# add li & white
self.redshifts = np.append(0.1, self.redshifts)
self.mchars = np.append(10.525 - 2 * log10(hubble), self.mchars)
self.amplitudes = np.append(0.0083 * hubble ** 3, self.amplitudes)
self.slopes = np.append(-1.155, self.slopes)
self.mchars2 = np.append(self.mchars2[0], self.mchars2)
self.amplitudes2 = np.append(0, self.amplitudes2)
self.slopes2 = np.append(self.slopes2[0], self.slopes2)
# add marchesini et al
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.append(self.redshifts, [1.6, 2.5, 3.56, 4.03])
self.mchars = np.append(self.mchars,
np.array([10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble))
self.amplitudes = np.append(self.amplitudes,
np.array([0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.append(self.slopes, [-1.00, -1.01, -1.39, -1.39])
self.mchars2 = np.append(self.mchars2, np.zeros(4) + self.mchars2[0])
self.amplitudes2 = np.append(self.amplitudes2, np.zeros(4))
self.slopes2 = np.append(self.slopes2, np.zeros(4) + self.slopes2[0])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-drory-march_sameslope':
'''
Apply low-mass slope from Drory et al 2009 to Li & White, Marchesini et al.
'''
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.61, 10.62, 10.66, 10.63, 10.60, 10.65, 11.07,
11.07] - 2 * log10(hubble))
self.amplitudes = np.array([0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297,
0.00115, 0.000155, 0.000155]) * hubble ** 3
self.slopes = np.array([-1.155, -1.06, -1.05, -0.93, -0.91, -1.00, -1.01, -1.39, -1.39])
self.mchars2 = (np.array([9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83]) -
2 * log10(hubble))
self.amplitudes2 = np.array([0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962,
0.000375, 0.0000503, 0.0000503]) * hubble ** 3
self.slopes2 = np.array([-1.70, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'perez':
'''
Perez-Gonzalez et al 2008. 0.1 < z < 4.0 from Spitzer, Hubble, Chandra.
Salpeter IMF.
Complete to (8, 9.5, 10, 11) M_star at z = (0, 1, 2, 3).
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.15, 1.45, 1.8, 2.25, 2.75, 3.25,
3.75])
self.mchars = np.array([11.16, 11.20, 11.26, 11.25, 11.27, 11.31, 11.34, 11.40, 11.46,
11.34, 11.33, 11.36]) + 2 * log10(h_them / hubble)
# convert to Chabrier IMF
self.mchars -= log10(1.6)
# convert to [Mpc ^ -3 dex ^ -1]
self.amplitudes = (10 ** np.array([-2.47, -2.65, -2.76, -2.82, -2.91, -3.06, -3.27,
- 3.49, -3.69, -3.64, -3.74, -3.94]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.18, -1.19, -1.22, -1.26, -1.23, -1.26, -1.29, -1.27, -1.26,
- 1.20, -1.14, -1.23])
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s' % source)
def make_splines(self):
'''
Make spline fits to SMF fit parameters v redshift.
Use 1st order spline (k) to avoid ringing.
'''
self.mchar_z_spl = interpolate.splrep(self.redshifts, self.mchars, k=1)
self.slope_z_spl = interpolate.splrep(self.redshifts, self.slopes, k=1)
self.amplitude_z_spl = interpolate.splrep(self.redshifts, self.amplitudes, k=1)
if self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.mchar2_z_spl = interpolate.splrep(self.redshifts, self.mchars2, k=1)
self.slope2_z_spl = interpolate.splrep(self.redshifts, self.slopes2, k=1)
self.amplitude2_z_spl = interpolate.splrep(self.redshifts, self.amplitudes2, k=1)
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5 or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
if self.source in ('li'):
self.m_char = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.slope = self.slopes[0] + 1
elif self.source in ('baldry'):
self.m_char = self.mchars[0]
self.mchar2 = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.amplitude2 = self.amplitudes2[0] * np.log(10)
self.slope = self.slopes[0] + 1
self.slope2 = self.slopes2[0] + 1
elif self.source in ('cole-march', 'li-march', 'perez', 'constant-li', 'li-march-extreme'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
elif self.source == 'fontana':
self.m_char = self.mchar0 + self.mchar1 * redshift + self.mchar2 * redshift ** 2
self.amplitude = (self.amplitude0 * (1 + redshift) ** self.amplitude1) * np.log(10)
self.slope = (self.slope0 + self.slope1 * redshift) + 1
elif self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
self.mchar2 = interpolate.splev(redshift, self.mchar2_z_spl)
self.amplitude2 = interpolate.splev(redshift, self.amplitude2_z_spl) * np.log(10)
self.slope2 = interpolate.splev(redshift, self.slope2_z_spl) + 1
self.make_numden_m_spline(self.redshift, self.scat)
def dndm(self, m_star):
'''
Compute d(num-den) / d(log m) = ln(10) * amplitude * (10^(m_star - m_char)) ** (1 + slope) *
exp(-10^(m_star - m_char)).
Import stellar mass.
'''
m_rats = 10 ** (m_star - self.m_char)
if 'drory' in self.source or self.source == 'baldry':
dm2s = 10 ** (m_star - self.mchar2)
return (self.amplitude * m_rats ** self.slope * np.exp(-m_rats) +
self.amplitude2 * dm2s ** self.slope2 * np.exp(-dm2s))
else:
return self.amplitude * m_rats ** self.slope * np.exp(-m_rats)
def numden(self, m_min, m_max=14):
'''
Compute number density within range.
Import stellar mass range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def make_numden_m_spline(self, redshift=0.1, scat=0):
'''
Make splines to relate d(num-den) / d[log]m & num-den(> m) to m.
Import redshift (if want to change), mass scatter [dex].
'''
iter_num = 30
if redshift != self.redshift:
self.initialize_redshift(redshift)
if scat != self.scat:
self.scat = scat
dm = 0.01
dm_scat_lo = 3 * scat # extend fit for deconvolute b.c.'s
dm_scat_hi = 0.5 * scat # extend fit for deconvolute b.c.'s
self.mmin = 7.3
self.mmax = 12.3
m_stars = np.arange(self.mmin - dm_scat_lo, self.mmax + dm_scat_hi, dm, np.float32)
numdens = np.zeros(m_stars.size)
dndms = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
# make sure numdens are monotonically decreasing even if = -infinity
numdens[mi] = self.numden(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
dndms[mi] = self.dndm(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(m_stars, log10(numdens))
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], m_stars[::-1])
# at high z, smf not monotonically decreasing, so spline not work on below
# self.m_log_dndm_spl = interpolate.splrep(log10(dndms)[::-1], m_stars[::-1])
# make scatter splines
if scat:
# deconvolve osbserved smf assuming scatter to find unscattered one
dndms_scat = deconvolute(dndms, scat, dm, iter_num)
# chop off lower boundaries, unreliable
m_stars = m_stars[dm_scat_lo / dm:]
dndms_scat = dndms_scat[dm_scat_lo / dm:]
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(m_stars, dndms_scat)
numdens_scat = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
numdens_scat[mi] = interpolate.splint(m_stars[mi], m_stars.max(),
self.dndm_m_scat_spl)
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(m_stars, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1],
m_stars[::-1])
def m(self, num_den):
'''
Get mass at threshold.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_spl).astype(np.float32)
def m_scat(self, num_den):
'''
Get mass at threshold, using de-scattered source.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_scat_spl).astype(np.float32)
def m_dndm(self, dn_dm):
'''
Get mass at d(num-den)/d[log]m.
Import d(num-den) / d[log]m.
'''
return interpolate.splev(log10(dn_dm), self.m_log_dndm_spl)
def dndm_scat(self, m):
'''
Get d(num-den) / d[log]m at m, using de-scattered source.
Import mass.
'''
return interpolate.splev(m, self.dndm_m_scat_spl)
def numden_scat(self, m_min, m_max=14):
'''
Get num-den(>[log]m) at m, using de-scattered source.
Import mass.
'''
return integrate.quad(self.dndm_scat, m_min, m_max)[0]
#return 10 ** (interpolate.splev(m, self.log_numden_m_scat_spl))
class LFClass(SMFClass):
'''
Relate number density [Mpc ^ -3] <-> magnitude/luminosity using spline fit to luminosity
functions.
Import spline querying functions from SMFClass.
'''
def __init__(self, source='blanton', scat=0, hubble=0.7, redshift=0.1):
'''
Import source, log-normal scatter.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'norberg':
# Norberg et al 2002: 2dF r-band at z ~ 0.1.
self.m_char = -19.66
self.amplitude = 1.61e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'blanton':
# Blanton et al 03: SDSS r-band z ~ 0.1.
self.m_char = -20.44
self.amplitude = 1.49e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.05
elif source == 'sheldon':
# Sheldon et al 07: SDSS i-band z = 0.25. Valid for Mag < -19.08 (0.19L*).
self.m_char = -20.9 # Hansen et al 09 catalog has -20.8
self.amplitude = 1.02e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'cool_ages':
# Cool et al 2012: AGES.
self.redshifts = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.65])
self.mchars = np.array([-20.58, -20.81, -20.81, -20.99, -21.29, -21.38])
self.amplitudes = (np.array([1.59e-2, 1.52e-2, 1.24e-2, 1.44e-2, 1.08e-2, 1.05e-2]) * hubble ** 3) # Mpc ^ -3
self.slopes = np.repeat(-1.05, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s in LFClass' % source)
if source != 'cool_ages':
self.make_numden_m_spline(scat, redshift=None)
def dndm(self, mag):
'''
Get d(num-den) / d(mag).
Import (positive) magnitude.
'''
mag *= -1.
return (np.log(10) / 2.5 * self.amplitude *
10 ** ((self.slope + 1) / 2.5 * (self.m_char - mag)) *
np.exp(-10 ** ((self.m_char - mag) / 2.5)))
def numden(self, m_min, m_max=25):
'''
Get number density within range.
Import (positive) magnitude range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5:# or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
self.m_char = interpolate.splev(redshift, self.mchar_z_spl, ext=0)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl, ext=0)
self.slope = interpolate.splev(redshift, self.slope_z_spl, ext=0)
self.make_numden_m_spline(scat = self.scat, redshift = self.redshift)
def make_numden_m_spline(self, scat=0, redshift=0.1):
'''
Make splines to relate d(num-den)/d(mag) & num-den(> mag) to mag.
Import scatter [dex].
'''
try:
if redshift != self.redshift:
self.initialize_redshift(redshift)
except AttributeError:
pass
if scat != self.scat:
self.scat = scat # convert scatter in log(lum) to scatter in magnitude
mag_scat = 2.5 * self.scat
deconvol_iter_num = 30
dmag = 0.01
dmag_scat_lo = 2 * mag_scat # extend fit for b.c.'s of deconvolute
dmag_scat_hi = 1 * mag_scat
self.mmin = 17.0
#self.mmax = 23.3
self.mmax=24.
mags = np.arange(self.mmin - dmag_scat_lo, self.mmax + dmag_scat_hi, dmag, np.float32)
numdens = np.zeros(mags.size)
dndms = np.zeros(mags.size)
for mi in xrange(len(mags)):
numdens[mi] = np.abs(self.numden(mags[mi]))
dndms[mi] = self.dndm(mags[mi])
#print 'numden ', numdens[:10]
#print mags[:10]
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(mags, log10(numdens))
self.dndm_m_spl = interpolate.splrep(mags, dndms)
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], mags[::-1])
# make scatter splines
if self.scat:
# deconvolve observed lf assuming scatter to find unscattered one
dndms_scat = deconvolute(dndms, mag_scat, dmag, deconvol_iter_num)
# chop off boundaries, unreliable
#print mags.min(), mags.max()
#mags = mags[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
#dndms_scat = dndms_scat[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
#print mags.min(), mags.max()
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(mags, dndms_scat)
numdens_scat = np.zeros(mags.size)
for mi in xrange(mags.size):
numdens_scat[mi] = np.abs(interpolate.splint(mags[mi], mags.max(), self.dndm_m_scat_spl))
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(mags, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1], mags[::-1])
# Utility functions
def deconvolute(y_conv, scatter, x_wid, iter_num=10):
'''
Get deconvolved version via Lucy routine.
Import gaussian convoluted function, scatter, bin width, number of iterations.
'''
yit = y_conv
for _ in xrange(iter_num):
ratio = y_conv / ndimage.filters.gaussian_filter1d(yit, scatter / x_wid)
yit = yit * ndimage.filters.gaussian_filter1d(ratio, scatter / x_wid)
# this is part of lucy's routine, but seems less stable
#yit = yit * ratio
return yit
def elements(vals, lim=[-Inf, Inf], vis=None, vis_2=None, get_indices=False, dtype=np.int32):
'''
Get the indices of the input values that are within the input limit, that also are in input vis
index array (if defined).
Either of limits can have same range as vals.
Import array, range to keep, prior indices of vals array to keep,
other array to sub-sample in same way, whether to return selection indices of input vis array.
'''
if not isinstance(vals, np.ndarray):
vals = np.array(vals)
# check if input array
if vis is None:
vis = np.arange(vals.size, dtype=dtype)
else:
vals = vals[vis]
vis_keep = vis
# check if limit is just one value
if np.isscalar(lim):
keeps = (vals == lim)
else:
# sanity check - can delete this eventually
if isinstance(lim[0], int) and isinstance(lim[1], int):
if lim[0] == lim[1]:
raise ValueError('input limit = %s, has same value' % lim)
if lim[0] != lim[1] and 'int' in vals.dtype.name:
print '! elements will not keep objects at lim[1] = %d' % lim[1]
if not np.isscalar(lim[0]) or lim[0] > -Inf:
keeps = (vals >= lim[0])
else:
keeps = None
if not np.isscalar(lim[1]) or lim[1] < Inf:
if keeps is None:
keeps = (vals < lim[1])
else:
keeps *= (vals < lim[1])
elif keeps is None:
keeps = np.arange(vals.size, dtype=dtype)
if get_indices:
if vis_2 is not None:
return vis_keep[keeps], vis_2[keeps], np.arange(vis.size, dtype=dtype)[keeps]
else:
return vis_keep[keeps], np.arange(vis.size, dtype=dtype)[keeps]
else:
if vis_2 is not None:
return vis_keep[keeps], vis_2[keeps]
else:
return vis_keep[keeps]
def arange_length(array_or_length_or_imin=None, imax=None, dtype=np.int32):
'''
Get arange corresponding to input limits or input array size.
Import array or array length or starting value (if latter, also need ending value).
'''
if imax is None:
if np.isscalar(array_or_length_or_imin):
num = array_or_length_or_imin
else:
num = len(array_or_length_or_imin)
return np.arange(num, dtype=dtype)
else:
return np.arange(array_or_length_or_imin, imax, dtype=dtype)
def intersection_index(arr1, arr2):
"""
Find the indicies of the intersecting elements of arr1 and arr2.
Takes approximately < 1 second
"""
sort_arr1_indices = np.argsort(arr1)
sort_arr2_indices = np.argsort(arr2)
sorted_arr1 = arr1[sort_arr1_indices]
sorted_arr2 = arr2[sort_arr2_indices]
arr1_in1d = np.in1d(sorted_arr1, sorted_arr2)
arr2_in1d = np.in1d(sorted_arr2, sorted_arr1)
arr1_intersect_indices = sort_arr1_indices[arr1_in1d]
arr2_intersect_indices = sort_arr2_indices[arr2_in1d]
return arr1_intersect_indices, arr2_intersect_indices
if __name__=='__main__':
add_to_catalog(catalog='bolshoi')
#DownloadedCatalog(catalog='smdpl')
for style in [(0.17 , 'tailored') , (0.15 , 'Vpeak')]:
sham_dict = {
'm_kind': 'mag_r',
'scat': style[0],
'source': 'blanton',
'sham_prop': style[1]
}
shame = shamHalos(catalog='bolshoi', sham_dict=sham_dict)
shame.ReadHaloCatalog()
shame.Write()
|
mjvakili/gambly
|
code/sham.py
|
Python
|
mit
| 40,863
|
[
"Gaussian"
] |
8185ce4596e15dabb278ccb1b8ca5e1acc49d58848354485e17459bcad994cb3
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
import espressomd.electrostatics
from espressomd import electrostatic_extensions
@utx.skipIfMissingFeatures(["P3M"])
class ELC_vs_MMM2D_neutral(ut.TestCase):
# Handle to espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
acc = 1e-7
elc_gap = 10.0
box_l = 10.0
bl2 = box_l * 0.5
system.time_step = 0.01
system.cell_system.skin = 0.1
def test_elc_vs_mmm2d(self):
elc_param_sets = {
"inert": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc},
"dielectric": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc,
"delta_mid_bot": 0.1,
"delta_mid_top": 0.9},
"const_pot_0": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": 0.0},
"const_pot_1": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": 1.0},
"const_pot_m1": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": -1.0}
}
mmm2d_param_sets = {
"inert": {
"prefactor": 1.0,
"maxPWerror": self.acc},
"dielectric": {
"prefactor": 1.0,
"maxPWerror": self.acc,
"dielectric_contrast_on": 1,
"delta_mid_bot": 0.1,
"delta_mid_top": 0.9},
"const_pot_0": {
"prefactor": 1.0,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": 0.0},
"const_pot_1": {
"prefactor": 1.0,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": 1.0},
"const_pot_m1": {
"prefactor": 1.0,
"maxPWerror": self.acc,
"const_pot": True,
"pot_diff": -1.0}
}
self.system.box_l = 3 * [self.box_l]
buf_node_grid = self.system.cell_system.node_grid
self.system.cell_system.set_layered(
n_layers=10, use_verlet_lists=False)
self.system.periodicity = [1, 1, 0]
q = 1.0
self.system.part.add(id=0, pos=(5.0, 5.0, 5.0), q=-q)
self.system.part.add(id=1, pos=(2.0, 2.0, 5.0), q=q / 3.0)
self.system.part.add(id=2, pos=(2.0, 5.0, 2.0), q=q / 3.0)
self.system.part.add(id=3, pos=(5.0, 2.0, 7.0), q=q / 3.0)
# MMM2D
mmm2d = espressomd.electrostatics.MMM2D(**mmm2d_param_sets["inert"])
self.system.actors.add(mmm2d)
mmm2d_res = {}
mmm2d_res["inert"] = self.scan()
mmm2d.set_params(**mmm2d_param_sets["dielectric"])
mmm2d_res["dielectric"] = self.scan()
mmm2d.set_params(**mmm2d_param_sets["const_pot_0"])
mmm2d_res["const_pot_0"] = self.scan()
mmm2d.set_params(**mmm2d_param_sets["const_pot_1"])
mmm2d_res["const_pot_1"] = self.scan()
mmm2d.set_params(**mmm2d_param_sets["const_pot_m1"])
mmm2d_res["const_pot_m1"] = self.scan()
self.system.actors.remove(mmm2d)
# ELC
self.system.box_l = [self.box_l, self.box_l, self.box_l + self.elc_gap]
self.system.cell_system.set_domain_decomposition(
use_verlet_lists=True)
self.system.cell_system.node_grid = buf_node_grid
self.system.periodicity = [1, 1, 1]
p3m = espressomd.electrostatics.P3M(prefactor=1.0, accuracy=self.acc,
mesh=[24, 24, 32], cao=6)
self.system.actors.add(p3m)
elc = electrostatic_extensions.ELC(**elc_param_sets["inert"])
self.system.actors.add(elc)
elc_res = {}
elc_res["inert"] = self.scan()
elc.set_params(**elc_param_sets["dielectric"])
elc_res["dielectric"] = self.scan()
elc.set_params(**elc_param_sets["const_pot_0"])
elc_res["const_pot_0"] = self.scan()
elc.set_params(**elc_param_sets["const_pot_1"])
elc_res["const_pot_1"] = self.scan()
elc.set_params(**elc_param_sets["const_pot_m1"])
elc_res["const_pot_m1"] = self.scan()
for run in elc_res:
self.assertTrue(np.testing.assert_allclose(
mmm2d_res[run], elc_res[run], rtol=0, atol=1e-4) is None)
def scan(self):
n = 10
d = 0.5
res = []
for i in range(n + 1):
z = self.box_l - d - 1.0 * i / n * (self.box_l - 2 * d)
self.system.part[0].pos = [self.bl2, self.bl2, z]
self.system.integrator.run(0)
energy = self.system.analysis.energy()
m = [z]
m.extend(self.system.part[0].f)
m.append(energy['coulomb'])
res.append(m)
return res
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/elc_vs_mmm2d_neutral.py
|
Python
|
gpl-3.0
| 5,884
|
[
"ESPResSo"
] |
4f0e659fb4e810c4fdd86b7460f807536f7175631ced5656f0c019f31faa1995
|
# -*- coding: utf-8 -*-
#
# @most/core documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 12 07:51:42 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'@most/core'
copyright = u'2017, MostJS Org'
author = u'Brian Cavalier, Tylor Steinberger, David Chase'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mostcoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mostcore.tex', u'@most/core Documentation',
u'Brian Cavalier, Tylor Steinberger, David Chase', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mostcore', u'@most/core Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mostcore', u'@most/core Documentation',
author, 'mostcore', 'One line description of project.',
'Miscellaneous'),
]
|
axefrog/core
|
docs/conf.py
|
Python
|
mit
| 4,803
|
[
"Brian"
] |
b1e57354d9d11bfd4f961f69b0e6f38dd695659afa01f3bbe0675426a4286684
|
"""
This file is part of the DiffractionMicroscopy project.
Copyright 2016 David W. Hogg (NYU, SCDA).
This piece of code does nothing related to diffraction.
It only shows that you can reconstruct an image from small numbers of
photons taken in exoposures at unknown orientations.
# issues
- Should we apply the rotation projections to the sampling pixel
points or to the Gaussian basis functions? Probably the latter.
"""
import pickle
import numpy as np
def hoggsumexp(qns, axis=None):
"""
# purpose:
- Computes `L = log(sum(exp(qns, axis=-1)))` but stably.
- Also computes its N-dimensional gradient components dL / dg_m.
# input
- `qns`: ndarray of shape (n1, n2, n3, ..., nD, N)
# output
- `L`: ndarray of shape (n1, n2, n3, ..., nD)
- `dL_dqns`: ndarray same shape as `qns`
# issues
- Not exhaustively tested.
"""
if axis is None:
axis = len(qns.shape) - 1
Q = np.max(qns)
expqns = np.exp(qns - Q)
expL = np.sum(expqns, axis=axis)
return np.log(expL) + Q, expqns / np.expand_dims(expL, axis)
class ImageModel:
def __init__(self, ns, xnqs):
self.N = int(np.max(ns)) + 1
self.ns = ns
self.xnqs = xnqs
self.initialize_bases()
self.create_angle_sampling()
print("image_model:", self.lnams.shape, self.ns.shape, self.xms.shape,
self.xnqs.shape)
def initialize_bases(self):
"""
Make the things you need for a grid of overlapping Gaussians.
# issues
- Magic numbers.
- The three-d model is actually two-d, which is cheating!!
"""
self.sigma = 3.0 # magic # 2.0
self.sigma2 = self.sigma ** 2
nyhalf, nxhalf = 16, 16 # magic # 12, 24
yms, xms = np.meshgrid(np.arange(2 * nyhalf + 1),
np.arange(2 * nxhalf + 1))
yms = (yms - nyhalf).flatten() * self.sigma # lots of magic
xms = (xms - nxhalf).flatten() * self.sigma
zms = np.zeros_like(yms) # this is cheating!!
self.M = len(yms)
self.xms = np.vstack((yms, xms, zms)).T
self.lnams = np.random.normal(size=yms.shape)
return None
def create_angle_sampling(self, T=2**10): # MAGIC 1024
"""
# issues
- Ought to re-draw yhats that have large dot products with xhats...
"""
self.T = T
self.rotations = np.zeros((T, 2, 3))
xhats = np.random.normal(size=3*T).reshape((T, 3))
xhats /= np.sqrt(np.sum(xhats**2, axis=1))[:, None]
yhats = np.random.normal(size=3*T).reshape((T, 3))
yhats -= np.sum(xhats*yhats, axis=1)[:,None] * xhats
yhats /= np.sqrt(np.sum(yhats**2, axis=1))[:, None]
self.rotations[:, 0, :] = xhats
self.rotations[:, 1, :] = yhats
return None
def evaluate_rotated_lnbases(self, xqs):
"""
# input:
- xqs: ndarray of shape [Q, 2]
# output:
- lngtqms: evaluations of shape [T, Q, M]
"""
Q, two = xqs.shape
assert two == 2
xtms = np.sum(self.rotations[:, None, :, :] * self.xms[None, :, None, :], axis=3)
print(xqs.shape, xtms.shape)
return -0.5 * np.sum((xqs[None, :, None, :] - xtms[:, None, :, :]) ** 2, axis=3) / self.sigma2 \
- np.log(2. * np.pi * self.sigma2)
def evaluate_lnbases(self, xfqs):
"""
# input:
- xfqs: ndarray of shape [foo, Q, 2]
# output:
- lngfqms: evaluations of shape [foo, Q, M]
"""
return -0.5 * np.sum((xfqs[:, :, None, :] - self.xms[None, None, :, :2]) ** 2, axis=3) / self.sigma2 \
- np.log(2. * np.pi * self.sigma2)
def pickle_to_file(self, fn):
fd = open(fn, "wb")
pickle.dump(self, fd)
fd.close()
print(fn)
return None
def plot(self, ax):
"""
Put a two-d image onto a matplotlib plot.
# issues:
- Magic numbers.
- Requires matplotlib (or the ducktype).
"""
f = 0.65 # magic
ys = np.arange(-self.sigma * f * np.sqrt(self.M), self.sigma * f * np.sqrt(self.M), 1) # magic
xs = np.arange(-self.sigma * f * np.sqrt(self.M), self.sigma * f * np.sqrt(self.M), 1) # magic
ys, xs = np.meshgrid(ys, xs)
ny, nx = ys.shape
xps = np.zeros((ny, nx, 2))
xps[:, :, 0] = ys
xps[:, :, 1] = xs
image = np.sum(np.exp(self.lnams[None, None, :] + self.evaluate_lnbases(xps)), axis=2) # unsafe
vmin = -0.75 * np.max(image)
ax.imshow(-image.T, interpolation="nearest", origin="lower", vmin=vmin, vmax=0.)
return None
def single_image_lnlike(self, n):
"""
# input:
- n: index of the image for which lnL should be computed
# output:
- lnLn, dlnLn_dlnams: lnL and its gradient wrt self.lnams
# issues:
- Not tested.
- Too many asserts!
- Is the penalty and its derivative correct? Hogg is suspicious.
"""
I = (self.ns == n)
Q = np.sum(I)
xqs = (self.xnqs[I]).reshape((Q, 2))
lngtqms = self.evaluate_rotated_lnbases(xqs)
assert lngtqms.shape == (self.T, Q, self.M)
# logsumexp over m index (ie, summing the mixture of Gaussians)
lnLntqs, dlnLntqs_dlnams = hoggsumexp(self.lnams[None, None, :] + lngtqms, axis=2)
assert lnLntqs.shape == (self.T, Q)
assert dlnLntqs_dlnams.shape == (self.T, Q, self.M)
# sum over q index (ie, product together all the photons in image n)
lnLnts = np.sum(lnLntqs, axis=1)
assert lnLnts.shape == (self.T, )
dlnLnts_dlnams = np.sum(dlnLntqs_dlnams, axis=1)
assert dlnLnts_dlnams.shape == (self.T, self.M)
# logsumexp over t index (ie, marginalize out the angles)
lnLn, dlnLn_dlnLnts = hoggsumexp(lnLnts, axis=0)
assert dlnLn_dlnLnts.shape == (self.T, )
dlnLn_dlnams = np.sum(dlnLn_dlnLnts[:, None] * dlnLnts_dlnams, axis=0)
assert dlnLn_dlnams.shape == (self.M, )
dpenalty_dlnams = np.exp(self.lnams)
penalty = np.sum(dpenalty_dlnams) # is this correct?
return lnLn - penalty, dlnLn_dlnams - dpenalty_dlnams
def test_hoggsumexp():
for shape in [(7, ), (3, 5, 9)]:
qns = np.random.normal(size=shape)
L, dL = hoggsumexp(qns)
if len(shape) == 3:
assert L.shape == (3, 5)
assert dL.shape == (3, 5, 9)
delta = 1e-5
if len(shape) == 1:
qns[3] += delta
else:
qns[2, 2, 4] += delta
L1, foo = hoggsumexp(qns)
if len(shape) == 1:
qns[3] -= 2. * delta
else:
qns[2, 2, 4] -= 2. * delta
L2, foo = hoggsumexp(qns)
if len(shape) == 1:
print("test_hoggsumexp():", dL[3], (L1 - L2) / (2. * delta))
else:
print("test_hoggsumexp():", dL[2, 2, 4], (L1 - L2) / (2. * delta))
return True
if __name__ == "__main__":
import matplotlib.pyplot as plt
photons = np.load('./photons.npy')
ns = photons[:, 0]
xnqs = photons[:, 1:]
# initialize model
model = ImageModel(ns, xnqs)
# check derivative
##Ln, gradLn = model.single_image_lnlike(0)
##delta = 1.e-5 # magic
##model.lnams[5] += delta
##Ln2, gradLn2 = model.single_image_lnlike(0)
##print(gradLn[5], (Ln2 - Ln) / delta)
# take a few gradient steps
fig = plt.figure()
sumh = 0.
hplot = 0.
for j in range(2 ** 16):
h = 4096. / (2048. + float(j)) # magic
sumh += h
n = np.random.randint(model.N)
Ln, gradLn = model.single_image_lnlike(n)
print("stochastic", j, h, n, Ln)
model.lnams += h * gradLn
# plot the output of the s.g.
if sumh > hplot:
hplot += 40.
pfn = "./model_{:06d}.pkl".format(j)
model.pickle_to_file(pfn)
plt.clf()
plt.gray()
ax = plt.gca()
model.plot(ax)
pfn = "./model_{:06d}.png".format(j)
plt.savefig(pfn)
print(pfn)
|
davidwhogg/DiffractionMicroscopy
|
code/toyproblems/twodimage.py
|
Python
|
mit
| 8,207
|
[
"Gaussian"
] |
773cf1577ce5f6dd08bde95a3f24d2b621defab8f156aaeba3513136366e5fdd
|
"""
Plots the 2D pressure field from a IBAMR simulation at saved time-steps using
the visualization software VisIt.
"""
from snake.ibamr.simulation import IBAMRSimulation
simulation = IBAMRSimulation()
body_name = 'flyingSnake2dAoA35ds004filledInside' # file name (no extension)
simulation.plot_field_contours_visit('pressure', (-1.0, 0.5),
body=body_name,
solution_folder='numericalSolution',
view=(-15.0, -15.0, 15.0, 15.0),
width=800)
|
barbagroup/cuIBM
|
external/snake-0.3/examples/ibamr/plotPressure.py
|
Python
|
mit
| 591
|
[
"VisIt"
] |
d7bf20f79c9df5a180124505d06182bafd92a8889538b70e2645ace7d2c171f1
|
from neo.core.baseneo import BaseNeo
class Unit(BaseNeo):
"""
A :class:`Unit` regroups all the :class:`SpikeTrain` objects that were emitted
by a neuron during a :class:`Block`. The spikes may come from different :class:`Segment` objects
within the :class:`Block`, so this object is not contained in the usual
:class:`Block`/:class:`Segment`/:class:`SpikeTrain` hierarchy.
A :class:`Unit` is linked to :class:`RecordingChannelGroup` objects from which it was detected.
With tetrodes, for instance, multiple channels may record the same unit.
This replaces the :class:`Neuron` class in the previous version of Neo.
*Usage*::
# Store the spike times from a pyramidal neuron recorded on channel 0
u = neo.Unit(name='pyramidal neuron')
# first segment
st1 = neo.SpikeTrain(times=[.01, 3.3, 9.3], units='sec')
u.spiketrains.append(st1)
# second segment
st2 = neo.SpikeTrain(times=[100.01, 103.3, 109.3], units='sec')
u.spiketrains.append(st2)
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name:
:description:
:file_origin:
*Container of*:
:class:`SpikeTrain`
:class:`Spike`
"""
def __init__(self, name=None, description=None, file_origin=None,
channel_indexes = None, **annotations):
"""Initialize a new neuronal Unit (spike source)"""
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
self.channel_indexes = channel_indexes
self.spiketrains = [ ]
self.spikes = [ ]
self.recordingchannelgroup = None
|
tkf/neo
|
neo/core/unit.py
|
Python
|
bsd-3-clause
| 1,869
|
[
"NEURON"
] |
6b3c269c889c14c00f6439ed3cecfa1cd87121f03b54b1e34337198522bef0e9
|
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from vPiP import *
from vPiP.generators.spiral import generateSpiral
Vpip = vPiP.Vpip
with Vpip() as p:
# p.setShowDrawing(True)
# p.setPlotting(False)
try:
d = 100.0
for x in range(100, 2500, 240):
p.moveTo(x, 100)
for j in generateSpiral(x, 100, 100, d, 1000, 2):
p.drawTo(j[0], j[1])
p.moveTo(x, 350)
for j in generateSpiral(x, 350, 100, d, 1000, 4):
p.drawTo(j[0], j[1])
p.moveTo(x, 590)
for j in generateSpiral(x, 590, 100, d, 1000, 8):
p.drawTo(j[0], j[1])
p.moveTo(x, 830)
for j in generateSpiral(x, 830, 100, d, 1000, 16):
p.drawTo(j[0], j[1])
d += 100.0
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
|
brianinnes/vPiP
|
python/test2.py
|
Python
|
apache-2.0
| 1,586
|
[
"Brian"
] |
9e6b34f7ffb49eff9e16f077d08fe059f21cadeabe84ca61b2b287bf167f564b
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_gl.py
# Purpose: viz running LAMMPS simulation via GL tool in Pizza.py
# Syntax: viz_gl.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
from __future__ import print_function
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print("Syntax: viz_gl.py in.lammps Nfreq Nsteps")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
tkroot = None
try:
import Tkinter
except:
import tkinter as Tkinter
tkroot = Tkinter.Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
g.show(ntimestep)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
|
Dixon3/lammps
|
python/examples/viz_gl.py
|
Python
|
gpl-2.0
| 1,952
|
[
"LAMMPS"
] |
7ffbcb8e4ddf667899808f5b0b3fcecc38ee1e502281def7b1c898ee85038545
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the high-level Fisher estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.contrib.kfac.python.ops import placement
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
# The linter is confused.
# pylint: disable=abstract-class-instantiated
def make_fisher_estimator(placement_strategy=None, **kwargs):
"""Creates Fisher estimator instances based on the placement strategy.
For example if the `placement_strategy` is 'round_robin' then
`FisherEstimatorRoundRobin` instance is returned.
Args:
placement_strategy: `string`, Strategy to be used for placing covariance
variables, covariance ops and inverse ops. Check
`placement.FisherEstimatorRoundRobin` for a concrete example.
**kwargs: Arguments to be passed into `FisherEstimator` class initializer.
Returns:
An instance of class which inherits from `FisherEstimator` and the mixin
which implements specific placement strategy. See,
`FisherEstimatorRoundRobin` which inherits from `FisherEstimator` and
`RoundRobinPlacementMixin`.
Raises:
ValueError: If the `placement_strategy` is not equal to 'round_robin'.
"""
if placement_strategy in [None, "round_robin"]:
return FisherEstimatorRoundRobin(**kwargs)
else:
raise ValueError("Unimplemented vars and ops "
"placement strategy : {}".format(placement_strategy))
# pylint: enable=abstract-class-instantiated
@six.add_metaclass(abc.ABCMeta)
class FisherEstimator(object):
"""Fisher estimator class supporting various approximations of the Fisher.
This is an abstract base class which does not implement a strategy for
placing covariance variables, covariance update ops and inverse update ops.
The placement strategies are implemented in `placement.py`. See
`FisherEstimatorRoundRobin` for example of a concrete subclass with
a round-robin placement strategy.
"""
def __init__(self,
variables,
cov_ema_decay,
damping,
layer_collection,
exps=(-1,),
estimation_mode="gradients",
colocate_gradients_with_ops=True,
name="FisherEstimator",
compute_cholesky=False,
compute_cholesky_inverse=False):
"""Create a FisherEstimator object.
Args:
variables: A `list` of variables or `callable` which returns the variables
for which to estimate the Fisher. This must match the variables
registered in layer_collection (if it is not None).
cov_ema_decay: The decay factor used when calculating the covariance
estimate moving averages.
damping: float. The damping factor used to stabilize training due to
errors in the local approximation with the Fisher information matrix,
and to regularize the update direction by making it closer to the
gradient. (Higher damping means the update looks more like a standard
gradient update - see Tikhonov regularization.)
layer_collection: The layer collection object, which holds the Fisher
blocks, Kronecker factors, and losses associated with the
graph.
exps: List of floats or ints. These represent the different matrix
powers of the approximate Fisher that the FisherEstimator will be able
to multiply vectors by. If the user asks for a matrix power other
one of these (or 1, which is always supported), there will be a
failure. (Default: (-1,))
estimation_mode: The type of estimator to use for the Fishers. Can be
'gradients', 'empirical', 'curvature_prop', or 'exact'.
(Default: 'gradients'). 'gradients' is the basic estimation approach
from the original K-FAC paper. 'empirical' computes the 'empirical'
Fisher information matrix (which uses the data's distribution for the
targets, as opposed to the true Fisher which uses the model's
distribution) and requires that each registered loss have specified
targets. 'curvature_propagation' is a method which estimates the
Fisher using self-products of random 1/-1 vectors times "half-factors"
of the Fisher, as described here: https://arxiv.org/abs/1206.6464 .
Finally, 'exact' is the obvious generalization of Curvature
Propagation to compute the exact Fisher (modulo any additional
diagonal or Kronecker approximations) by looping over one-hot vectors
for each coordinate of the output instead of using 1/-1 vectors. It
is more expensive to compute than the other three options by a factor
equal to the output dimension, roughly speaking.
colocate_gradients_with_ops: Whether we should request gradients be
colocated with their respective ops. (Default: True)
name: A string. A name given to this estimator, which is added to the
variable scope when constructing variables and ops.
(Default: "FisherEstimator")
compute_cholesky: Bool. Whether or not the FisherEstimator will be
able to multiply vectors by the Cholesky factor.
(Default: False)
compute_cholesky_inverse: Bool. Whether or not the FisherEstimator
will be able to multiply vectors by the Cholesky factor inverse.
(Default: False)
Raises:
ValueError: If no losses have been registered with layer_collection.
"""
self._variables = variables
self._cov_ema_decay = cov_ema_decay
self._damping = damping
self._estimation_mode = estimation_mode
self._layers = layer_collection
self._gradient_fns = {
"gradients": self._get_grads_lists_gradients,
"empirical": self._get_grads_lists_empirical,
"curvature_prop": self._get_grads_lists_curvature_prop,
"exact": self._get_grads_lists_exact
}
self._colocate_gradients_with_ops = colocate_gradients_with_ops
self._made_vars = False
self._exps = exps
self._compute_cholesky = compute_cholesky
self._compute_cholesky_inverse = compute_cholesky_inverse
self._name = name
@property
def variables(self):
if callable(self._variables):
return self._variables()
else:
return self._variables
@property
def damping(self):
return self._damping
@property
def blocks(self):
"""All registered FisherBlocks."""
return self._layers.get_blocks()
@property
def factors(self):
"""All registered FisherFactors."""
return self._layers.get_factors()
@property
def name(self):
return self._name
@abc.abstractmethod
def make_vars_and_create_op_thunks(self, scope=None):
"""Make vars and create op thunks with a specific placement strategy.
For each factor, all of that factor's cov variables and their associated
update ops will be placed on a particular device. A new device is chosen
for each factor by cycling through list of devices in the cov_devices
argument. If cov_devices is None then no explicit device placement occurs.
An analogous strategy is followed for inverse update ops, with the list of
devices being given by the inv_devices argument.
Inverse variables on the other hand are not placed on any specific device
(they will just use the current the device placement context, whatever
that happens to be). The idea is that the inverse variable belong where
they will be accessed most often, which is the device that actually applies
the preconditioner to the gradient. The user will be responsible for setting
the device context for this.
Args:
scope: A string or None. If None it will be set to the name of this
estimator (given by the name property). All variables will be created,
and all thunks will execute, inside of a variable scope of the given
name. (Default: None)
Returns:
cov_update_thunks: List of cov update thunks. Corresponds one-to-one with
the list of factors given by the "factors" property.
inv_update_thunks: List of inv update thunks. Corresponds one-to-one with
the list of factors given by the "factors" property.
"""
pass
def _apply_transformation(self, vecs_and_vars, transform):
"""Applies an block-wise transformation to the corresponding vectors.
Args:
vecs_and_vars: List of (vector, variable) pairs.
transform: A function of the form f(fb, vec), where vec is the vector
to transform and fb is its corresponding block in the matrix, that
returns the transformed vector.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
vecs = utils.SequenceDict((var, vec) for vec, var in vecs_and_vars)
trans_vecs = utils.SequenceDict()
for params, fb in self._layers.fisher_blocks.items():
trans_vecs[params] = transform(fb, vecs[params])
return [(trans_vecs[var], var) for _, var in vecs_and_vars]
def multiply_inverse(self, vecs_and_vars):
"""Multiplies the vecs by the corresponding (damped) inverses of the blocks.
Args:
vecs_and_vars: List of (vector, variable) pairs.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
return self.multiply_matpower(-1, vecs_and_vars)
def multiply(self, vecs_and_vars):
"""Multiplies the vectors by the corresponding (damped) blocks.
Args:
vecs_and_vars: List of (vector, variable) pairs.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
return self.multiply_matpower(1, vecs_and_vars)
def multiply_matpower(self, exp, vecs_and_vars):
"""Multiplies the vecs by the corresponding matrix powers of the blocks.
Args:
exp: A float representing the power to raise the blocks by before
multiplying it by the vector.
vecs_and_vars: List of (vector, variable) pairs.
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
assert exp in self._exps
fcn = lambda fb, vec: fb.multiply_matpower(vec, exp)
return self._apply_transformation(vecs_and_vars, fcn)
def multiply_cholesky(self, vecs_and_vars, transpose=False):
"""Multiplies the vecs by the corresponding Cholesky factors.
Args:
vecs_and_vars: List of (vector, variable) pairs.
transpose: Bool. If true the Cholesky factors are transposed before
multiplying the vecs. (Default: False)
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
assert self._compute_cholesky
fcn = lambda fb, vec: fb.multiply_cholesky(vec, transpose=transpose)
return self._apply_transformation(vecs_and_vars, fcn)
def multiply_cholesky_inverse(self, vecs_and_vars, transpose=False):
"""Mults the vecs by the inverses of the corresponding Cholesky factors.
Note: if you are using Cholesky inverse multiplication to sample from
a matrix-variate Gaussian you will want to multiply by the transpose.
Let L be the Cholesky factor of F and observe that
L^-T * L^-1 = (L * L^T)^-1 = F^-1 .
Thus we want to multiply by L^-T in order to sample from Gaussian with
covariance F^-1.
Args:
vecs_and_vars: List of (vector, variable) pairs.
transpose: Bool. If true the Cholesky factor inverses are transposed
before multiplying the vecs. (Default: False)
Returns:
A list of (transformed vector, var) pairs in the same order as
vecs_and_vars.
"""
assert self._compute_cholesky_inverse
fcn = lambda fb, vec: fb.multiply_cholesky_inverse(vec, transpose=transpose)
return self._apply_transformation(vecs_and_vars, fcn)
def _instantiate_factors(self):
"""Instantiates FisherFactors' variables.
Raises:
ValueError: If estimation_mode was improperly specified at construction.
"""
blocks = self.blocks
tensors_to_compute_grads = [
block.tensors_to_compute_grads() for block in blocks
]
try:
grads_lists = self._gradient_fns[self._estimation_mode](
tensors_to_compute_grads)
except KeyError:
raise ValueError("Unrecognized value {} for estimation_mode.".format(
self._estimation_mode))
for grads_list, block in zip(grads_lists, blocks):
block.instantiate_factors(grads_list, self.damping)
def _check_vars_unmade_and_set_made_flag(self):
if self._made_vars:
raise Exception("Already made variables.")
self._made_vars = True
def made_vars(self):
return self._made_vars
def _register_matrix_functions(self):
for block in self.blocks:
for exp in self._exps:
block.register_matpower(exp)
if self._compute_cholesky:
block.register_cholesky()
if self._compute_cholesky_inverse:
block.register_cholesky_inverse()
def _finalize_layer_collection(self):
self._layers.create_subgraph()
self._layers.check_registration(self.variables)
self._instantiate_factors()
self._register_matrix_functions()
def create_ops_and_vars_thunks(self, scope=None):
"""Create thunks that make the ops and vars on demand.
This function returns 4 lists of thunks: cov_variable_thunks,
cov_update_thunks, inv_variable_thunks, and inv_update_thunks.
The length of each list is the number of factors and the i-th element of
each list corresponds to the i-th factor (given by the "factors" property).
Note that the execution of these thunks must happen in a certain
partial order. The i-th element of cov_variable_thunks must execute
before the i-th element of cov_update_thunks (and also the i-th element
of inv_update_thunks). Similarly, the i-th element of inv_variable_thunks
must execute before the i-th element of inv_update_thunks.
TL;DR (oversimplified): Execute the thunks according to the order that
they are returned.
Args:
scope: A string or None. If None it will be set to the name of this
estimator (given by the name property). All thunks will execute inside
of a variable scope of the given name. (Default: None)
Returns:
cov_variable_thunks: A list of thunks that make the cov variables.
cov_update_thunks: A list of thunks that make the cov update ops.
inv_variable_thunks: A list of thunks that make the inv variables.
inv_update_thunks: A list of thunks that make the inv update ops.
"""
self._check_vars_unmade_and_set_made_flag()
self._finalize_layer_collection()
scope = self.name if scope is None else scope
cov_variable_thunks = [
self._create_cov_variable_thunk(factor, scope)
for factor in self.factors
]
cov_update_thunks = [
self._create_cov_update_thunk(factor, scope) for factor in self.factors
]
inv_variable_thunks = [
self._create_inv_variable_thunk(factor, scope)
for factor in self.factors
]
inv_update_thunks = [
self._create_inv_update_thunk(factor, scope) for factor in self.factors
]
return (cov_variable_thunks, cov_update_thunks,
inv_variable_thunks, inv_update_thunks)
def _create_cov_variable_thunk(self, factor, scope):
"""Constructs a covariance variable thunk for a single FisherFactor."""
def thunk():
with variable_scope.variable_scope(scope):
return factor.instantiate_cov_variables()
return thunk
def _create_cov_update_thunk(self, factor, scope):
"""Constructs a covariance update thunk for a single FisherFactor."""
def thunk():
with variable_scope.variable_scope(scope):
return factor.make_covariance_update_op(self._cov_ema_decay)
return thunk
def _create_inv_variable_thunk(self, factor, scope):
"""Constructs a inverse variable thunk for a single FisherFactor."""
def thunk():
with variable_scope.variable_scope(scope):
return factor.instantiate_inv_variables()
return thunk
def _create_inv_update_thunk(self, factor, scope):
"""Constructs an inverse update thunk for a single FisherFactor."""
def thunk():
with variable_scope.variable_scope(scope):
return control_flow_ops.group(factor.make_inverse_update_ops())
return thunk
def _get_grads_lists_gradients(self, tensors):
# Passing in a list of loss values is better than passing in the sum as
# the latter creates unnessesary ops on the default device
grads_flat = gradients_impl.gradients(
self._layers.eval_losses_on_samples(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_grads_lists_empirical(self, tensors):
# Passing in a list of loss values is better than passing in the sum as
# the latter creates unnecessary ops on the default device
grads_flat = gradients_impl.gradients(
self._layers.eval_losses(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_transformed_random_signs(self):
transformed_random_signs = []
for loss in self._layers.losses:
with tf_ops.colocate_with(self._layers.loss_colocation_ops[loss]):
transformed_random_signs.append(
loss.multiply_fisher_factor(
utils.generate_random_signs(loss.fisher_factor_inner_shape)))
return transformed_random_signs
def _get_grads_lists_curvature_prop(self, tensors):
loss_inputs = list(loss.inputs for loss in self._layers.losses)
transformed_random_signs = self._get_transformed_random_signs()
grads_flat = gradients_impl.gradients(
nest.flatten(loss_inputs),
nest.flatten(tensors),
grad_ys=nest.flatten(transformed_random_signs),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_grads_lists_exact(self, tensors):
"""No docstring required."""
# Loop over all coordinates of all losses.
grads_all = []
for loss in self._layers.losses:
with tf_ops.colocate_with(self._layers.loss_colocation_ops[loss]):
for index in np.ndindex(*loss.fisher_factor_inner_static_shape[1:]):
transformed_one_hot = loss.multiply_fisher_factor_replicated_one_hot(
index)
grads_flat = gradients_impl.gradients(
loss.inputs,
nest.flatten(tensors),
grad_ys=transformed_one_hot,
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all.append(nest.pack_sequence_as(tensors, grads_flat))
return zip(*grads_all)
class FisherEstimatorRoundRobin(placement.RoundRobinPlacementMixin,
FisherEstimator):
"""Fisher estimator which provides round robin device placement strategy."""
pass
|
ZhangXinNan/tensorflow
|
tensorflow/contrib/kfac/python/ops/estimator.py
|
Python
|
apache-2.0
| 20,349
|
[
"Gaussian"
] |
4f95d4487dc2da9393085940310ed7f5294696ca1d8048852c59f353d25ea8b3
|
import os.path as osp
import pickle
import time
import numpy as np
from mastic.selection import CoordArray
from mastic.molecule import Atom, Bond
# must set this to pickle substantiated systems sometimes
#sys.setrecursionlimit(100000)
# load the system type pickle in
inputs_path = osp.realpath("../../examples/sEH-TPPU")
system_pkl_path = osp.join(inputs_path, "sEH_TPPU_SystemType.pkl")
with open(system_pkl_path, 'rb') as rf:
seh_tppu_System = pickle.load(rf)
# load the rdkit wrapper pickles and the coordinates
TPPU_rdkit_pkl_path = osp.join(".", "TPPU_rdkit.pkl")
seh_rdkit_pkl_path = osp.join(".", "sEH_rdkit.pkl")
TPPU_coords_path = osp.join(".", "TPPU.npy")
seh_coords_path = osp.join(".", "sEH.npy")
with open(TPPU_rdkit_pkl_path, 'rb') as rf:
TPPU_rdkit_wrapper = pickle.load(rf)
with open(seh_rdkit_pkl_path, 'rb') as rf:
seh_rdkit_wrapper = pickle.load(rf)
TPPU_coords = np.load(TPPU_coords_path)
seh_coords = np.load(seh_coords_path)
member_coords = [TPPU_coords, seh_coords]
seh_type = seh_tppu_System.member_types[1]
# make atoms for the whole molecule
coord_array = CoordArray(seh_coords)
atoms = []
for atom_idx, atom_type in enumerate(seh_type.atom_types):
atom = Atom(atom_array=coord_array, array_idx=atom_idx, atom_type=atom_type)
atoms.append(atom)
bond_idx = 0
bond_type = seh_type.bond_types[bond_idx]
atom_ids = seh_type.bond_map[bond_idx]
print("Making the Bond")
start = time.time()
bond = Bond(atom_container=atoms, atom_ids=atom_ids,
bond_type=bond_type)
end = time.time()
print("Time for creating an sEH bond was {} seconds".format(end-start))
|
salotz/mast
|
prototypes/profile_system/profile_bond_creation.py
|
Python
|
mit
| 1,629
|
[
"RDKit"
] |
9c287c5f2604f10984965936dcb87f8627c91cdb4090979026ba61608a4abb48
|
#
# Copyright 2014 Flytxt
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import numpy as np
from operator import add
from scipy.misc import logsumexp
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.clustering import KMeans
class GMMclustering:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s %(message)s')
def fit(self, data, n_components, n_iter, ct):
"""
Estimate model parameters with the expectation-maximization
algorithm.
Parameters
----------
data - RDD of data points
n_components - Number of components
n_iter - Number of iterations. Default to 100
Attributes
----------
covariance_type : Type of covariance matrix.
Supports only diagonal covariance matrix.
ct : Threshold value to check the convergence criteria.
Defaults to 1e-3
min_covar : Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
converged : True once converged False otherwise.
Weights : array of shape (1, n_components)
weights for each mixture component.
Means : array of shape (n_components, n_dim)
Mean parameters for each mixture component.
Covars : array of shape (n_components, n_dim)
Covariance parameters for each mixture component
"""
sc = data.context
covariance_type = 'diag'
converged = False
self.min_covar = 1e-3
# observation statistics
self.s0 = 0
self.s1 = 0
# To get the no of data points
n_points = data.count()
# To get the no of dimensions
n_dim = data.first().size
if (n_points == 0):
raise ValueError(
'Dataset cannot be empty')
if (n_points < n_components):
raise ValueError(
'Not possible to make (%s) components from (%s) datapoints' %
(n_components, n_points))
# Initialize Covars(diagonal covariance matrix)
if hasattr(data.first(), 'indices'):
self.isSparse = 1
def convert_to_kvPair(eachV):
g = []
for i in range(eachV.indices.size):
g.append((eachV.indices[i],
(eachV.values[i], eachV.values[i]*eachV.values[i])))
return g
def computeVariance(x):
mean = x[1][0]/n_points
sumSq = x[1][1]/n_points
return x[0], sumSq - mean*mean
cov = []
kvPair = data.flatMap(convert_to_kvPair)
res = kvPair.reduceByKey(np.add).map(computeVariance)
cov = Vectors.sparse(n_dim, res.collectAsMap()).toArray() + 1e-3
self.Covars = np.tile(cov, (n_components, 1))
else:
self.isSparse = 0
cov = []
for i in range(n_dim):
cov.append(data.map(lambda m: m[i]).variance()+self.min_covar)
self.Covars = np.tile(cov, (n_components, 1))
# Initialize Means using MLlib KMeans
self.Means = np.array(KMeans().train(data, n_components).clusterCenters)
# Initialize Weights with the value 1/n_components for each component
self.Weights = np.tile(1.0 / n_components, n_components)
# EM algorithm
# loop until number of iterations or convergence criteria is satisfied
for i in range(n_iter):
logging.info("GMM running iteration %s " % i)
# broadcasting means,covars and weights
self.meansBc = sc.broadcast(self.Means)
self.covarBc = sc.broadcast(self.Covars)
self.weightBc = sc.broadcast(self.Weights)
# Expectation Step
EstepOut = data.map(self.scoreOnePoint)
# Maximization step
MstepIn = EstepOut.reduce(lambda (w1, x1, y1, z1), (w2, x2, y2, z2):
(w1+w2, x1+x2, y1+y2, z1+z2))
self.s0 = self.s1
self.mStep(MstepIn[0], MstepIn[1], MstepIn[2], MstepIn[3])
# Check for convergence.
if i > 0 and abs(self.s1-self.s0) < ct:
converged = True
logging.info("Converged at iteration %s" % i)
break
return self
def scoreOnePoint(self, x):
"""
Compute the log likelihood of 'x' being generated under the current model
Also returns the probability that 'x' is generated by each component of the mixture
Parameters
----------
x : array of shape (1, n_dim)
Corresponds to a single data point.
Returns
-------
log_likelihood_x :Log likelihood of 'x'
prob_x : Resposibility of each cluster for the data point 'x'
"""
lpr = (self.log_multivariate_normal_density_diag_Nd(x) + np.log(self.Weights))
log_likelihood_x = logsumexp(lpr)
prob_x = np.exp(lpr-log_likelihood_x)
if self.isSparse == 1:
temp_wt = np.dot(prob_x[:, np.newaxis], x.toArray()[np.newaxis, :])
sqVec = Vectors.sparse(x.size, x.indices, x.values**2)
temp_avg = np.dot(prob_x.T[:, np.newaxis], sqVec.toArray()[np.newaxis, :])
else:
temp_wt = np.dot(prob_x.T[:, np.newaxis], x[np.newaxis, :])
temp_avg = np.dot(prob_x.T[:, np.newaxis], (x*x)[np.newaxis, :])
return log_likelihood_x, prob_x, temp_wt, temp_avg
def log_multivariate_normal_density_diag_Nd(self, x):
"""
Compute Gaussian log-density at x for a diagonal model
"""
n_features = x.size
if self.isSparse == 1:
t = Vectors.sparse(x.size, x.indices, x.values**2).dot((1/self.covarBc.value).T)
else:
t = np.dot(x**2, (1/self.covarBc.value).T)
lpr = -0.5 * (n_features*np.log(2*np.pi) + np.sum(np.log(self.covarBc.value), 1) +
np.sum((self.meansBc.value ** 2) / self.covarBc.value, 1)
- 2 * x.dot((self.meansBc.value/self.covarBc.value).T) + t)
return lpr
def mStep(self, log_sum, prob_sum, weighted_X_sum, weighted_X2_sum):
"""
Perform the Mstep of the EM algorithm.
Updates Means, Covars and Weights using observation statistics
"""
self.s1 = log_sum
inverse_prob_sum = 1.0 / (prob_sum)
self.Weights = (prob_sum / (prob_sum.sum()))
self.Means = (weighted_X_sum * inverse_prob_sum.T[:, np.newaxis])
self.Covars = ((weighted_X2_sum * inverse_prob_sum.T[:, np.newaxis]) - (self.Means**2)
+ self.min_covar)
def predict(self, x):
"""
Predicts the cluster to which the given instance belongs to
based on the maximum resposibility.
Parameters
----------
x : array of shape (1, n_dim)
Corresponds to a single data point.
Returns
-------
resposibility_matrix:membership values of x in each cluster component
"""
if hasattr(x, 'indices'):
self.isSparse = 1
else:
self.isSparse = 0
lpr = (self.log_multivariate_normal_density_diag_Nd(x) + np.log(self.Weights))
log_likelihood_x = logsumexp(lpr)
prob_x = np.exp(lpr-log_likelihood_x)
resposibility_matrix = np.array(prob_x)
return resposibility_matrix
|
himanshu14/GMM
|
GMMclustering.py
|
Python
|
apache-2.0
| 8,344
|
[
"Gaussian"
] |
d111ed2073c6b752379ba879660f517d9819b1e1a04b5b7ed50fa0cac1b354b5
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "worker" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
jtwhite79/pyemu
|
versioneer.py
|
Python
|
bsd-3-clause
| 68,612
|
[
"Brian"
] |
e5d1bd168335fa5e80818c932767e526d0e58b159146e288c1810ab7fd2fb359
|
from __main__ import vtk, qt, ctk, slicer
import numpy
import copy
from math import *
from slicer.ScriptedLoadableModule import *
import os
import pickle
import time
from slicer.util import VTKObservationMixin
class ModelAddedClass(VTKObservationMixin):
def __init__(self, anglePlanes):
VTKObservationMixin.__init__(self)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeAddedEvent, self.nodeAddedCallback)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeRemovedEvent, self.nodeRemovedCallback)
self.anglePlanes = anglePlanes
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeAddedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
callData.AddObserver(callData.DisplayModifiedEvent, self.anglePlanes.onChangeModelDisplay)
self.addObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
self.anglePlanes.updateOnSurfaceCheckBoxes()
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeRemovedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
self.removeObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
callData.RemoveObservers(callData.DisplayModifiedEvent)
self.anglePlanes.removeModelPointLocator(callData.GetName())
self.anglePlanes.updateOnSurfaceCheckBoxes()
if isinstance(callData, slicer.vtkMRMLMarkupsFiducialNode):
name = callData.GetName()
planeid = name[len('P'):]
name = "Plane " + planeid
if name in self.anglePlanes.planeControlsDictionary.keys():
self.anglePlanes.RemoveManualPlane(planeid)
def onModelNodePolyDataModified(self, caller, eventId):
self.anglePlanes.addModelPointLocator(caller.GetName(), caller.GetPolyData())
class AnglePlanesMiddleFiducial():
def __init__(self, P1, P2, onSurface, nodeID):
self.P1 = P1
self.P2 = P2
self.onSurface = onSurface
self.nodeID = nodeID
class AnglePlanes(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Angle Planes"
parent.categories = ["Shape Analysis"]
parent.dependencies = []
parent.contributors = ["Julia Lopinto", "Juan Carlos Prieto", "Francois Budin"]
parent.helpText = """
This Module is used to calculate the angle between two planes by using the normals.
The user gets the choice to use two planes which are already implemented on Slicer
or they can define a plane by using landmarks (at least 3 landmarks).
Plane can also be saved to be reused for other models.
This is an alpha version of the module.
It can't be used for the moment.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
self.parent = parent
class AnglePlanesWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.moduleName = "AnglePlanes"
self.i = 0
self.logic = AnglePlanesLogic()
self.planeControlsId = 0
self.planeControlsDictionary = {}
#self.midPointFiducialDictionaryID = {}
# self.logic.initializePlane()
self.ignoredNodeNames = ('Red Volume Slice', 'Yellow Volume Slice', 'Green Volume Slice')
self.colorSliceVolumes = dict()
self.n_vector = numpy.matrix([[0], [0], [1], [1]])
self.interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
# Definition of the 2 planes
# Collapsible button -- Scene Description
self.loadCollapsibleButton = ctk.ctkCollapsibleButton()
self.loadCollapsibleButton.text = "Scene"
self.layout.addWidget(self.loadCollapsibleButton)
# Layout within the laplace collapsible button
self.loadFormLayout = qt.QFormLayout(self.loadCollapsibleButton)
#--------------------------- List of Models --------------------------#
treeView = slicer.qMRMLTreeView()
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.setSceneModelType('Displayable')
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
header = treeView.header()
header.setResizeMode(0, qt.QHeaderView.Stretch)
header.setVisible(True)
self.loadFormLayout.addWidget(treeView)
self.autoChangeLayout = qt.QCheckBox()
self.autoChangeLayout.setCheckState(qt.Qt.Checked)
self.autoChangeLayout.setTristate(False)
self.autoChangeLayout.setText("Automatically change layout to 3D only")
self.loadFormLayout.addWidget(self.autoChangeLayout)
# Add vertical spacer
self.layout.addStretch(1)
#------------------------ Compute Bounding Box ----------------------#
buttonFrameBox = qt.QFrame(self.parent)
buttonFrameBox.setLayout(qt.QHBoxLayout())
self.loadFormLayout.addWidget(buttonFrameBox)
self.computeBox = qt.QPushButton("Compute Bounding Box around all models")
buttonFrameBox.layout().addWidget(self.computeBox)
self.computeBox.connect('clicked()', self.onComputeBox)
self.computeBox.setDisabled(True)
self.CollapsibleButton = ctk.ctkCollapsibleButton()
self.CollapsibleButton.text = "Manage planes"
self.layout.addWidget(self.CollapsibleButton)
self.managePlanesFormLayout = qt.QFormLayout(self.CollapsibleButton)
self.CollapsibleButton.checked = True
# Add planes and manage landmark addition to each plane
addNewPlaneLayout = qt.QHBoxLayout()
addPlaneLabel = qt.QLabel('Add new plane')
self.addPlaneButton = qt.QPushButton(qt.QIcon(":/Icons/MarkupsAddFiducial.png"), " ")
self.addPlaneButton.setFixedSize(50, 25)
self.addPlaneButton.connect('clicked()', self.addNewPlane)
self.addPlaneButton.setEnabled(True)
addNewPlaneLayout.addWidget(addPlaneLabel)
addNewPlaneLayout.addWidget(self.addPlaneButton)
self.managePlanesFormLayout.addRow(addNewPlaneLayout)
# ----------------- Compute Mid Point -------------
self.midPointGroupBox = ctk.ctkCollapsibleButton()
landmark1Layout = qt.QFormLayout()
self.midPointGroupBox.setText('Define middle point between two landmarks')
self.midPointGroupBox.collapsed = True
self.parent.layout().addWidget(self.midPointGroupBox)
self.selectPlaneForMidPoint = qt.QComboBox()
self.selectPlaneForMidPoint.connect('currentIndexChanged(int)', self.onChangeMiddlePointFiducialNode)
landmark1Layout.addRow('Choose plane: ', self.selectPlaneForMidPoint)
self.landmarkComboBox1MidPoint = qt.QComboBox()
self.landmarkComboBox2MidPoint = qt.QComboBox()
landmark1Layout.addRow('Landmark A: ', self.landmarkComboBox1MidPoint)
landmark1Layout.addRow('Landmark B: ', self.landmarkComboBox2MidPoint)
self.midPointOnSurfaceCheckBox = qt.QCheckBox('On Surface')
self.defineMiddlePointButton = qt.QPushButton(' Add middle point ')
self.defineRemoveMiddlePointButton = qt.QPushButton(' Remove middle point ')
middlePointLayout = qt.QHBoxLayout()
middlePointLayout.addWidget(self.defineMiddlePointButton)
middlePointLayout.addWidget(self.defineRemoveMiddlePointButton)
middlePointLayout.addWidget(self.midPointOnSurfaceCheckBox)
landmark1Layout.addRow(middlePointLayout)
self.midPointGroupBox.setLayout(landmark1Layout)
self.midPointGroupBox.setDisabled(True)
self.defineMiddlePointButton.connect('clicked()', self.onAddMidPoint)
self.defineRemoveMiddlePointButton.connect('clicked()', self.onRemoveMidPoint)
self.landmarkComboBox1MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
self.landmarkComboBox2MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
self.midPointOnSurfaceCheckBox.connect('stateChanged(int)', self.onSurfaceMidPointStateChanged)
# -------- Calculate angles between planes ------------
self.CollapsibleButtonPlane = ctk.ctkCollapsibleButton()
self.CollapsibleButtonPlane.text = "Choose planes"
self.layout.addWidget(self.CollapsibleButtonPlane)
sampleFormLayoutPlane = qt.QFormLayout(self.CollapsibleButtonPlane)
self.planeComboBox1 = qt.QComboBox()
self.fillColorsComboBox(self.planeComboBox1)
sampleFormLayoutPlane.addRow("Select plane 1: ", self.planeComboBox1)
self.planeComboBox2 = qt.QComboBox()
self.fillColorsComboBox(self.planeComboBox2)
sampleFormLayoutPlane.addRow("Select plane 2: ", self.planeComboBox2)
self.CollapsibleButton2 = ctk.ctkCollapsibleButton()
self.CollapsibleButton2.text = "Results"
self.layout.addWidget(self.CollapsibleButton2)
sampleFormLayout2 = qt.QFormLayout(self.CollapsibleButton2)
self.results = qt.QPushButton("Results")
self.results.connect('clicked()', self.angleValue)
sampleFormLayout2.addWidget(self.results)
label_RL = qt.QLabel("R-L View")
self.getAngle_RL = qt.QLabel("0")
label_SI = qt.QLabel("S-I View")
self.getAngle_SI = qt.QLabel("0")
label_AP = qt.QLabel("A-P View")
self.getAngle_AP = qt.QLabel("0")
self.getAngle_RL_comp = qt.QLabel("0")
self.getAngle_SI_comp = qt.QLabel("0")
self.getAngle_AP_comp = qt.QLabel("0")
tableResult = qt.QTableWidget(3, 3)
tableResult.setColumnCount(3)
tableResult.setHorizontalHeaderLabels([' View ', 'Angle', 'Complementary angle'])
tableResult.setColumnWidth(0, 80)
tableResult.setColumnWidth(1, 80)
tableResult.setColumnWidth(2, 180)
tableResult.setRowCount(1)
tableResult.setCellWidget(0, 0, label_RL)
tableResult.setCellWidget(0, 1, self.getAngle_RL)
tableResult.setCellWidget(0, 2, self.getAngle_RL_comp)
tableResult.setRowCount(2)
tableResult.setCellWidget(1, 0, label_SI)
tableResult.setCellWidget(1, 1, self.getAngle_SI)
tableResult.setCellWidget(1, 2, self.getAngle_SI_comp)
tableResult.setRowCount(3)
tableResult.setCellWidget(2, 0, label_AP)
tableResult.setCellWidget(2, 1, self.getAngle_AP)
tableResult.setCellWidget(2, 2, self.getAngle_AP_comp)
# Add vertical spacer
self.layout.addStretch(1)
sampleFormLayout2.addWidget(tableResult)
self.CollapsibleButton3 = ctk.ctkCollapsibleButton()
self.CollapsibleButton3.text = "Save"
self.layout.addWidget(self.CollapsibleButton3)
sampleFormLayout3 = qt.QFormLayout(self.CollapsibleButton3)
self.CollapsibleButton3.checked = False
buttonFrame = qt.QFrame(self.parent)
buttonFrame.setLayout(qt.QVBoxLayout())
sampleFormLayout3.addWidget(buttonFrame)
#-------------------------------- PLANES --------------------------------#
save_plane = qt.QLabel("Save the planes you create as a txt file.")
buttonFrame.layout().addWidget(save_plane)
save = qt.QPushButton("Save plane")
buttonFrame.layout().addWidget(save)
# load_plane = qt.QLabel("Load the file with the plane you saved.")
# buttonFrame.layout().addWidget(load_plane)
read = qt.QPushButton("Load plane")
buttonFrame.layout().addWidget(read)
#-------------------------------- CONNECTIONS --------------------------------#
self.planeComboBox1.connect('currentIndexChanged(QString)', self.valueComboBox)
self.planeComboBox2.connect('currentIndexChanged(QString)', self.valueComboBox)
# Setting combo boxes at different values/index otherwise infinite loop
self.planeComboBox1.setCurrentIndex(0)
self.planeComboBox2.setCurrentIndex(1)
self.valueComboBox()
save.connect('clicked(bool)', self.onSavePlanes)
read.connect('clicked(bool)', self.onReadPlanes)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
self.pointLocatorDictionary = {}
for i in self.getPositionOfModelNodes(False):
modelnode = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
self.addModelPointLocator(modelnode.GetName(), modelnode.GetPolyData())
modelnode.AddObserver(modelnode.DisplayModifiedEvent, self.onChangeModelDisplay)
self.middleFiducialDictionary = dict()
ModelAddedClass(self)
self.onUpdateMidPoint()
def canAddMiddlePoint(self):
if self.landmarkComboBox1MidPoint.currentText == self.landmarkComboBox2MidPoint.currentText\
or self.landmarkComboBox1MidPoint.count == 0 or self.landmarkComboBox2MidPoint.count == 0:
return False
else:
return True
def onUpdateMidPoint(self, remove=False):
if self.currentMidPointExists(remove):
self.defineRemoveMiddlePointButton.setDisabled(False)
self.defineMiddlePointButton.setDisabled(True)
else:
self.defineRemoveMiddlePointButton.setDisabled(True)
self.defineMiddlePointButton.setDisabled(False)
disableMiddlePointSurfaceCheckbox = False
if not self.canAddMiddlePoint():
self.defineMiddlePointButton.setDisabled(True)
self.updateOnSurfaceCheckBoxes()
def onSurfaceMidPointStateChanged(self):
key = self.getCurrentMidPointFiducialStructure()
if key != '':
self.middleFiducialDictionary[key].onSurface = self.midPointOnSurfaceCheckBox.isChecked()
if self.selectPlaneForMidPoint.currentText in self.planeControlsDictionary.keys():
self.planeControlsDictionary[self.selectPlaneForMidPoint.currentText].update()
def onChangeMiddlePointFiducialNode(self):
for x in [self.landmarkComboBox1MidPoint, self.landmarkComboBox2MidPoint]:
current = x.currentText
x.clear()
node = self.selectedMiddlePointPlane()
if not node:
return
for i in range(0, node.GetNumberOfMarkups()):
x.addItem(node.GetNthFiducialLabel(i))
if x.findText(current) > -1:
x.setCurrentIndex(x.findText(current))
def onChangeModelDisplay(self, obj, event):
self.updateOnSurfaceCheckBoxes()
def fillColorsComboBox(self, planeComboBox):
planeComboBox.clear()
planeComboBox.addItem("Red")
planeComboBox.addItem("Yellow")
planeComboBox.addItem("Green")
try:
for x in self.planeControlsDictionary.keys():
if self.planeControlsDictionary[x].PlaneIsDefined():
planeComboBox.addItem(x)
except NameError:
dummy = None
def updateOnSurfaceCheckBoxes(self):
numberOfVisibleModels = len(self.getPositionOfModelNodes(True))
# if they are new models and if they are visible, allow to select "on surface" to place new fiducials
if numberOfVisibleModels > 0:
self.computeBox.setDisabled(False)
if self.currentMidPointExists():
key = self.getCurrentMidPointFiducialStructure()
self.midPointOnSurfaceCheckBox.setDisabled(False)
self.midPointOnSurfaceCheckBox.setChecked(self.middleFiducialDictionary[key].onSurface)
else:
self.midPointOnSurfaceCheckBox.setChecked(False)
self.midPointOnSurfaceCheckBox.setDisabled(True)
for x in self.planeControlsDictionary.values():
x.surfaceDeplacementCheckBox.setDisabled(False)
# else there are no visible models or if they are not visible, disable "on surface" to place new fiducials
else:
self.computeBox.setDisabled(True)
self.midPointOnSurfaceCheckBox.setDisabled(True)
self.midPointOnSurfaceCheckBox.setChecked(False)
for x in self.planeControlsDictionary.values():
x.surfaceDeplacementCheckBox.setChecked(False)
x.surfaceDeplacementCheckBox.setDisabled(True)
def getPositionOfModelNodes(self, onlyVisible):
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
positionOfNodes = list()
for i in range(0, numNodes):
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if node.GetName() in self.ignoredNodeNames:
continue
if onlyVisible is True and node.GetDisplayVisibility() == 0:
continue
positionOfNodes.append(i)
return positionOfNodes
def enter(self):
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
self.currentLayout = lm.layout
lm.setLayout(4) # 3D-View
# Show manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, planeControls.slideOpacity.value)
self.valueComboBox()
self.onComputeBox()
def exit(self):
# Remove hidden nodes that are created just for Angle Planes
for x in self.colorSliceVolumes.values():
node = slicer.mrmlScene.GetNodeByID(x)
slicer.mrmlScene.RemoveNode(node)
node.SetHideFromEditors(False)
self.colorSliceVolumes = dict()
# Hide manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, 0)
# Hide planes
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
# Reset layout
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
if lm.layout == 4: # the user has not manually changed the layout
lm.setLayout(self.currentLayout)
def removeModelPointLocator(self, name):
if name in self.pointLocatorDictionary:
print("Removing point locator {0}".format(name))
del self.pointLocatorDictionary[name]
def addModelPointLocator(self, name, polydata):
if name not in self.pointLocatorDictionary and name not in self.ignoredNodeNames:
print "Adding point locator: {0}".format(name)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(polydata)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
self.pointLocatorDictionary[name] = pointLocator
def addNewPlane(self, keyLoad=-1):
if keyLoad != -1:
self.planeControlsId = keyLoad
else:
self.planeControlsId += 1
if len(self.planeControlsDictionary) >= 1:
self.addPlaneButton.setDisabled(True)
planeControls = AnglePlanesWidgetPlaneControl(self, self.planeControlsId, self.pointLocatorDictionary)
self.managePlanesFormLayout.addRow(planeControls)
key = "Plane " + str(self.planeControlsId)
self.planeControlsDictionary[key] = planeControls
self.updatePlanesComboBoxes()
self.midPointGroupBox.setDisabled(False)
self.selectPlaneForMidPoint.addItem(key)
def RemoveManualPlane(self, id):
key = "Plane " + str(id)
# If the plane has already been removed (for example, when removing this plane in this function,
# the callback on removing the nodes will be called, and therefore this function will be called again
# We need to not do anything the second time this function is called for the same plane
if key not in self.planeControlsDictionary.keys():
return
fiducialList = slicer.util.getNode('P' + str(id))
planeControls = self.planeControlsDictionary[key]
self.managePlanesFormLayout.removeWidget(planeControls)
self.planeControlsDictionary[key].deleteLater()
self.planeControlsDictionary.pop(key)
self.addPlaneButton.setDisabled(False)
if len(self.planeControlsDictionary.keys()) == 0:
self.midPointGroupBox.setDisabled(True)
self.midPointGroupBox.collapsed = True
self.updatePlanesComboBoxes()
self.valueComboBox()
if self.selectPlaneForMidPoint.findText(key) > -1:
self.selectPlaneForMidPoint.removeItem(self.selectPlaneForMidPoint.findText(key))
if fiducialList:
# fiducialList.SetDisplayVisibility(False)
fiducialList.RemoveObserver(fiducialList.onFiducialAddedObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialRemovedObserverTag)
fiducialList.RemoveObserver(fiducialList.setPointModifiedEventObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialAddedMidPointObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialRemovedMidPointObserverTag)
if planeControls.removeFiducials.checkState() == qt.Qt.Checked:
slicer.app.mrmlScene().RemoveNode(fiducialList)
def onComputeBox(self):
positionOfVisibleNodes = self.getPositionOfModelNodes(True)
if len(positionOfVisibleNodes) == 0:
return
maxValue = slicer.sys.float_info.max
bound = [maxValue, -maxValue, maxValue, -maxValue, maxValue, -maxValue]
for i in positionOfVisibleNodes:
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
polydata = node.GetPolyData()
if polydata is None or not hasattr(polydata, "GetBounds"):
continue
tempbound = polydata.GetBounds()
bound[0] = min(bound[0], tempbound[0])
bound[2] = min(bound[2], tempbound[2])
bound[4] = min(bound[4], tempbound[4])
bound[1] = max(bound[1], tempbound[1])
bound[3] = max(bound[3], tempbound[3])
bound[5] = max(bound[5], tempbound[5])
# --------------------------- Box around the model --------------------------#
dim = []
origin = []
for x in range(0, 3):
dim.append(bound[x * 2 + 1] - bound[x * 2])
origin.append(bound[x * 2] + dim[x] / 2)
dim[x] *= 1.1
dictColors = {'Red': 32, 'Yellow': 15, 'Green': 1}
for x in dictColors.keys():
sampleVolumeNode = self.CreateNewNode(x, dictColors[x], dim, origin)
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
compNode.SetBackgroundVolumeID(sampleVolumeNode.GetID())
print "set background" + x
lm = slicer.app.layoutManager()
#Reset and fit 2D-views
lm.resetSliceViews()
for x in dictColors.keys():
logic = lm.sliceWidget(x)
node = logic.mrmlSliceNode()
node.SetSliceResolutionMode(node.SliceResolutionMatch2DView)
logic.fitSliceToBackground()
#Reset pink box around models
for i in range(0, lm.threeDViewCount):
threeDView = lm.threeDWidget(i).threeDView()
threeDView.resetFocalPoint()
#Reset camera in 3D view to center the models and position the camera so that all actors can be seen
threeDView.renderWindow().GetRenderers().GetFirstRenderer().ResetCamera()
def CreateNewNode(self, colorName, color, dim, origin):
# we add a pseudo-random number to the name of our empty volume to avoid the risk of having a volume called
# exactly the same by the user which could be confusing. We could also have used slicer.app.sessionId()
if colorName not in self.colorSliceVolumes.keys():
VolumeName = "AnglePlanes_EmptyVolume_" + str(slicer.app.applicationPid()) + "_" + colorName
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
# We only create an image of 1 voxel (as we only use it to color the planes
imageData = vtk.vtkImageData()
imageData.SetDimensions(1, 1, 1)
imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
imageData.SetScalarComponentFromDouble(0, 0, 0, 0, color)
if hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode = slicer.vtkMRMLLabelMapVolumeNode()
else:
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
sampleVolumeNode.SetName(VolumeName)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
sampleVolumeNode.SetAndObserveImageData(imageData)
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
self.colorSliceVolumes[colorName] = sampleVolumeNode.GetID()
sampleVolumeNode = slicer.mrmlScene.GetNodeByID(self.colorSliceVolumes[colorName])
sampleVolumeNode.SetOrigin(origin[0], origin[1], origin[2])
sampleVolumeNode.SetSpacing(dim[0], dim[1], dim[2])
if not hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode.SetLabelMap(1)
sampleVolumeNode.SetHideFromEditors(True)
sampleVolumeNode.SetSaveWithScene(False)
return sampleVolumeNode
def selectedMiddlePointPlane(self):
if self.selectPlaneForMidPoint.currentText not in self.planeControlsDictionary.keys():
return None
id = self.planeControlsDictionary[self.selectPlaneForMidPoint.currentText].id
markupNodeName = 'P' + str(id)
nodes = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', markupNodeName)
node = nodes.GetItemAsObject(0)
return node
def computeMidPointPosition(self, node, p1ID, p2ID, coord):
f = list()
f.append(type('obj', (object,), {'ID': p1ID, 'coordinates': numpy.zeros(3)}))
f.append(type('obj', (object,), {'ID': p2ID, 'coordinates': numpy.zeros(3)}))
if not node:
return 1
found = 0
for j in (0,1):
fid = node.GetMarkupIndexByID(f[j].ID)
if fid != -1:
current = numpy.zeros(3)
node.GetNthFiducialPosition(fid, current)
f[j].coordinates = current
found += 1
if not found == 2:
print "Error: Fiducials not found in lists"
return 1
current = f[0].coordinates + f[1].coordinates
current /= 2
for i in range(0,3):
coord[i] = current[i]
return 0
def getFiducialIDFromName(self, node, name):
for i in range(0, node.GetNumberOfMarkups()):
if name == node.GetNthFiducialLabel(i):
return node.GetNthMarkupID(i)
return ''
def onAddMidPoint(self):
if self.currentMidPointExists():
print "Mid point already exists"
return
node = self.selectedMiddlePointPlane()
f = list()
f.append(type('obj', (object,), {'name': self.landmarkComboBox1MidPoint.currentText, 'ID': ""}))
f.append(type('obj', (object,), {'name': self.landmarkComboBox2MidPoint.currentText, 'ID': ""}))
for j in (0,1):
f[j].ID = self.getFiducialIDFromName(node, f[j].name)
if '' in [f[0].ID, f[1].ID]:
print "Error: Fiducials not found in lists"
return
coordinates = numpy.zeros(3)
self.computeMidPointPosition(node, f[0].ID, f[1].ID, coordinates)
node.AddFiducial(coordinates[0], coordinates[1], coordinates[2], f[0].name+"-"+f[1].name+"-mid-pt")
newFiducial = node.GetNumberOfMarkups() - 1
node.SetNthFiducialSelected(newFiducial, False)
node.SetNthMarkupLocked(newFiducial, True)
middleFiducial = AnglePlanesMiddleFiducial(f[0].ID, f[1].ID, self.midPointOnSurfaceCheckBox.isChecked(), node.GetID())
self.middleFiducialDictionary[node.GetNthMarkupID(newFiducial)] = middleFiducial
self.onUpdateMidPoint()
def currentMidPointExists(self, remove=False):
for x in self.middleFiducialDictionary.keys():
node = self.selectedMiddlePointPlane()
middleFiducial = self.middleFiducialDictionary[x]
if node.GetID() == middleFiducial.nodeID:
P1 = middleFiducial.P1
P2 = middleFiducial.P2
L1 = self.getFiducialIDFromName(node, self.landmarkComboBox1MidPoint.currentText)
L2 = self.getFiducialIDFromName(node, self.landmarkComboBox2MidPoint.currentText)
if P1 == L1 and P2 == L2 or P1 == L2 and P2 == L1:
if remove is True:
node.RemoveMarkup(node.GetMarkupIndexByID(x))
return False
else:
return True
return False
def getCurrentMidPointFiducialStructure(self):
if self.currentMidPointExists():
for x in self.middleFiducialDictionary.keys():
node = self.selectedMiddlePointPlane()
middleFiducial = self.middleFiducialDictionary[x]
if node.GetID() == middleFiducial.nodeID:
P1 = middleFiducial.P1
P2 = middleFiducial.P2
L1 = self.getFiducialIDFromName(node, self.landmarkComboBox1MidPoint.currentText)
L2 = self.getFiducialIDFromName(node, self.landmarkComboBox2MidPoint.currentText)
if P1 == L1 and P2 == L2 or P1 == L2 and P2 == L1:
return x
return ''
def onRemoveMidPoint(self):
self.onUpdateMidPoint(True)
def onFiducialChangedMidPoint(self, obj, event):
fidlist = obj
node = self.selectedMiddlePointPlane()
if not node or not fidlist == node:
return
self.onChangeMiddlePointFiducialNode()
def fiducialInList(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i):
return True
return False
def onCloseScene(self, obj, event):
self.middleFiducialDictionary = dict()
self.colorSliceVolumes = dict()
keys = self.planeControlsDictionary.keys()
for x in keys[len('Plane '):]:
self.RemoveManualPlane(x)
self.planeControlsDictionary = dict()
# globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def angleValue(self):
self.valueComboBox()
self.getAngle_RL.setText(self.logic.angle_degre_RL)
self.getAngle_RL_comp.setText(self.logic.angle_degre_RL_comp)
self.getAngle_SI.setText(self.logic.angle_degre_SI)
self.getAngle_SI_comp.setText(self.logic.angle_degre_SI_comp)
self.getAngle_AP.setText(self.logic.angle_degre_AP)
self.getAngle_AP_comp.setText(self.logic.angle_degre_AP_comp)
def setFirstItemInComboBoxNotGivenString(self, comboBox, oldString, noThisString):
if comboBox.findText(oldString) == -1:
allItems = [comboBox.itemText(i) for i in range(comboBox.count)]
for i in allItems:
if i != noThisString:
comboBox.setCurrentIndex(comboBox.findText(i))
break
else:
comboBox.setCurrentIndex(comboBox.findText(oldString))
def updatePlanesComboBoxes(self):
self.planeComboBox1.blockSignals(True)
self.planeComboBox2.blockSignals(True)
colorPlane1 = self.planeComboBox1.currentText
colorPlane2 = self.planeComboBox2.currentText
# Reset Combo boxes
self.fillColorsComboBox(self.planeComboBox1)
self.fillColorsComboBox(self.planeComboBox2)
self.planeComboBox2.removeItem(self.planeComboBox2.findText(colorPlane1))
self.planeComboBox1.removeItem(self.planeComboBox1.findText(colorPlane2))
self.setFirstItemInComboBoxNotGivenString(self.planeComboBox1, colorPlane1, colorPlane2)
self.setFirstItemInComboBoxNotGivenString(self.planeComboBox2, colorPlane2, colorPlane1)
self.planeComboBox1.blockSignals(False)
self.planeComboBox2.blockSignals(False)
def valueComboBox(self):
self.updatePlanesComboBoxes()
# Hide everything before showing what is necessary
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
colorPlane1 = self.planeComboBox1.currentText
colorPlane2 = self.planeComboBox2.currentText
self.defineAngle(colorPlane1, colorPlane2)
def modify(self, obj, event):
self.defineAngle(self.planeComboBox1.currentText, self.planeComboBox2.currentText)
def defineAngle(self, colorPlane1, colorPlane2):
print "DEFINE ANGLE"
print colorPlane1
if colorPlane1 in self.logic.ColorNodeCorrespondence:
slice1 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane1])
self.logic.getMatrix(slice1)
slice1.SetWidgetVisible(True)
slice1.SetSliceVisible(True)
matrix1 = self.logic.getMatrix(slice1)
normal1 = self.logic.defineNormal(matrix1)
else:
normal1 = self.planeControlsDictionary[colorPlane1].logic.N
print colorPlane2
if colorPlane2 in self.logic.ColorNodeCorrespondence:
slice2 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane2])
self.logic.getMatrix(slice2)
slice2.SetWidgetVisible(True)
slice2.SetSliceVisible(True)
matrix2 = self.logic.getMatrix(slice2)
normal2 = self.logic.defineNormal(matrix2)
else:
normal2 = self.planeControlsDictionary[colorPlane2].logic.N
self.logic.getAngle(normal1, normal2)
def onSavePlanes(self):
self.savePlanes()
def savePlanes(self, filename=None):
tempDictionary = {}
sliceRed = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Red'])
tempDictionary["Red"] = self.logic.getMatrix(sliceRed).tolist()
sliceYellow = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Yellow'])
tempDictionary["Yellow"] = self.logic.getMatrix(sliceYellow).tolist()
sliceGreen = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Green'])
tempDictionary["Green"] = self.logic.getMatrix(sliceGreen).tolist()
tempDictionary["customPlanes"] = {}
for key, plane in self.planeControlsDictionary.items():
tempDictionary["customPlanes"][plane.id] = plane.getFiducials()
print filename
if filename is None:
filename = qt.QFileDialog.getSaveFileName(parent=self, caption='Save file')
if filename != "":
fileObj = open(filename, "wb")
pickle.dump(tempDictionary, fileObj)
fileObj.close()
def onReadPlanes(self):
self.readPlanes()
def readPlanes(self, filename=None):
if filename is None:
filename = qt.QFileDialog.getOpenFileName(parent=self, caption='Open file')
if filename != "":
fileObj = open(filename, "rb")
tempDictionary = pickle.load(fileObj)
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeRed')
matList = tempDictionary["Red"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeYellow')
matList = tempDictionary["Yellow"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeGreen')
matList = tempDictionary["Green"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
customPlanes = tempDictionary["customPlanes"]
for key, fidlist in customPlanes.items():
self.addNewPlane(key)
tempkey = "Plane " + str(self.planeControlsId)
currentFidList = self.planeControlsDictionary[tempkey].logic.getFiducialList()
for i in range(0, len(fidlist)):
f = fidlist[i]
currentFidList.AddFiducial(f[0], f[1], f[2])
fileObj.close()
# This widget controls each of the planes that are added to the interface.
# The widget contains its own logic, i.e. an object of AnglePlanesLogic.
# Each plane contains a separate fiducial list. The planes are named P1, P2, ..., PN. The landmarks are named
# P1-1, P1-2, P1-N.
class AnglePlanesWidgetPlaneControl(qt.QFrame):
def __init__(self, anglePlanes, id, pointlocatordictionary):
qt.QFrame.__init__(self)
self.id = id
self.setLayout(qt.QFormLayout())
self.pointLocatorDictionary = pointlocatordictionary
self.logic = AnglePlanesLogic(id)
landmarkLayout = qt.QVBoxLayout()
planeLabelLayout = qt.QHBoxLayout()
planeLabel = qt.QLabel('Plane ' + str(id) + ":")
planeLabelLayout.addWidget(planeLabel)
planeLabelLayout.addStretch()
addFiducialLabel = qt.QLabel('Add')
addFiducialButton = qt.QPushButton(qt.QIcon(":/Icons/AnnotationPointWithArrow.png"), " ")
addFiducialButton.setFixedSize(50, 25)
addFiducialButton.connect('clicked()', self.addLandMarkClicked)
addFiducialButton.setEnabled(True)
planeLabelLayout.addWidget(addFiducialLabel)
planeLabelLayout.addWidget(addFiducialButton)
numberOfNodes = len(anglePlanes.getPositionOfModelNodes(True))
self.surfaceDeplacementCheckBox = qt.QCheckBox("On Surface")
if numberOfNodes > 0:
self.surfaceDeplacementCheckBox.setChecked(True)
else:
self.surfaceDeplacementCheckBox.setDisabled(True)
self.surfaceDeplacementCheckBox.connect('stateChanged(int)', self.onSurfaceDeplacementStateChanged)
planeLabelLayout.addWidget(self.surfaceDeplacementCheckBox)
landmarkLayout.addLayout(planeLabelLayout)
label1Layout = qt.QHBoxLayout()
label1 = qt.QLabel(' L1:')
self.landmark1ComboBox = qt.QComboBox()
landmark1ComboBox = self.landmark1ComboBox
landmark1ComboBox.addItem("Select")
landmark1ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label1Layout.addWidget(label1)
label1Layout.addWidget(landmark1ComboBox)
landmarkLayout.addLayout(label1Layout)
label2Layout = qt.QHBoxLayout()
label2 = qt.QLabel(' L2:')
self.landmark2ComboBox = qt.QComboBox()
landmark2ComboBox = self.landmark2ComboBox
landmark2ComboBox.addItem("Select")
landmark2ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label2Layout.addWidget(label2)
label2Layout.addWidget(landmark2ComboBox)
landmarkLayout.addLayout(label2Layout)
label3Layout = qt.QHBoxLayout()
label3 = qt.QLabel(' L3:')
self.landmark3ComboBox = qt.QComboBox()
landmark3ComboBox = self.landmark3ComboBox
landmark3ComboBox.addItem("Select")
landmark3ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label3Layout.addWidget(label3)
label3Layout.addWidget(landmark3ComboBox)
landmarkLayout.addLayout(label3Layout)
# fiducial list for the plane
fidNode = self.logic.getFiducialList()
for i in range(0, fidNode.GetNumberOfFiducials()):
label = fidNode.GetNthFiducialLabel(i)
landmark1ComboBox.addItem(label)
landmark2ComboBox.addItem(label)
landmark3ComboBox.addItem(label)
anglePlanes.landmarkComboBox1MidPoint.addItem(label)
anglePlanes.landmarkComboBox2MidPoint.addItem(label)
#anglePlanes.midPointFiducialDictionaryID[label] = fidNode.GetNthMarkupID(i)
fidNode.onFiducialAddedObserverTag = fidNode.AddObserver(fidNode.MarkupAddedEvent, self.onFiducialAdded)
fidNode.onFiducialRemovedObserverTag = fidNode.AddObserver(fidNode.MarkupRemovedEvent, self.onFiducialRemoved)
fidNode.setPointModifiedEventObserverTag = fidNode.AddObserver(fidNode.PointModifiedEvent,
self.onPointModifiedEvent)
# These observers are in AnglePlaneWidgets, they listen to any fiducial being added
#
fidNode.onFiducialAddedMidPointObserverTag = fidNode.AddObserver(fidNode.MarkupAddedEvent,
anglePlanes.onFiducialChangedMidPoint)
fidNode.onFiducialRemovedMidPointObserverTag = fidNode.AddObserver(fidNode.MarkupRemovedEvent,
anglePlanes.onFiducialChangedMidPoint)
self.layout().addRow(landmarkLayout)
self.slider = ctk.ctkSliderWidget()
slider = self.slider
slider.singleStep = 0.1
slider.minimum = 0.1
slider.maximum = 10
slider.value = 1.0
slider.toolTip = "Set the size of your plane."
self.slideOpacity = ctk.ctkSliderWidget()
slideOpacity = self.slideOpacity
slideOpacity.singleStep = 0.1
slideOpacity.minimum = 0.1
slideOpacity.maximum = 1
slideOpacity.value = 1.0
slideOpacity.toolTip = "Set the opacity of your plane."
slider.connect('valueChanged(double)', self.placePlaneClicked)
slideOpacity.connect('valueChanged(double)', self.placePlaneClicked)
landmarkSliderLayout = qt.QHBoxLayout()
label = qt.QLabel(' Size:')
label2 = qt.QLabel(' Opacity:')
landmarkSliderLayout.addWidget(label)
landmarkSliderLayout.addWidget(self.slider)
landmarkSliderLayout.addWidget(label2)
landmarkSliderLayout.addWidget(self.slideOpacity)
self.HidePlaneCheckBox = qt.QCheckBox("Hide")
self.HidePlaneCheckBox.setChecked(False)
self.HidePlaneCheckBox.connect('stateChanged(int)', self.onHideSurface)
landmarkSliderLayout.addWidget(self.HidePlaneCheckBox)
self.layout().addRow(landmarkSliderLayout)
removeButtonLayout = qt.QHBoxLayout()
removeButtonLayout.addStretch(1)
removePlaneButton = qt.QPushButton("Remove")
removeButtonLayout.addWidget(removePlaneButton)
self.removeFiducials = qt.QCheckBox("Remove Fiducials")
self.removeFiducials.setChecked(True)
removeButtonLayout.addWidget(self.removeFiducials)
self.layout().addRow(removeButtonLayout)
removePlaneButton.connect('clicked(bool)', self.onRemove)
self.anglePlanes = anglePlanes
def PlaneIsDefined(self):
if self.landmark1ComboBox.currentIndex > 0 and self.landmark2ComboBox.currentIndex > 0 and self.landmark3ComboBox.currentIndex > 0:
return True
else:
return False
def onRemove(self):
self.logic.remove()
self.anglePlanes.RemoveManualPlane(self.id)
def onFiducialRemoved(self, obj, event):
fidlist = obj
# Update combo boxes
for i in range(1, self.landmark1ComboBox.count):
found = self.fiducialInList(self.landmark1ComboBox.itemText(i), fidlist)
if not found:
self.landmark1ComboBox.removeItem(i)
self.landmark2ComboBox.removeItem(i)
self.landmark3ComboBox.removeItem(i)
break
# Update middle point dictionary
# Check that the fiducial that was remove was not a middle fiducial
for x in self.anglePlanes.middleFiducialDictionary.keys():
node = slicer.mrmlScene.GetNodeByID(self.anglePlanes.middleFiducialDictionary[x].nodeID)
if node == fidlist:
if node.GetMarkupIndexByID(x) == -1:
print "removing fiducial from middlefiducialDictionary"
del self.anglePlanes.middleFiducialDictionary[x]
# continue
# If fiducial that is removed is one of the two fiducials defining my middle point,
# we also remove the middle point
# If this loop removes a markup, this might start an asynchrone job that might modify
# the dictionary while we iterate. This would be an issue.
middleFiducialDictionary = copy.deepcopy(self.anglePlanes.middleFiducialDictionary)
for x in middleFiducialDictionary.keys():
node = slicer.mrmlScene.GetNodeByID(middleFiducialDictionary[x].nodeID)
p1 = middleFiducialDictionary[x].P1
p2 = middleFiducialDictionary[x].P2
if node.GetMarkupIndexByID(p1) == -1 or node.GetMarkupIndexByID(p2) == -1:
position = node.GetMarkupIndexByID(x)
if position != -1:
print "removing middle fiducial because end point has been removed"
node.RemoveMarkup(position)
# No need to remove it from middleFiducialDictionary here as the previous
# call should trigger the call of this function and remove this markup
# from middleFiducialDictionary for us
def getFiducials(self):
fidNode = self.logic.getFiducialList()
listCoord = list()
coord = numpy.zeros(3)
fidNode.GetNthFiducialPosition(int(self.landmark1ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark2ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark3ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
return listCoord
def placePlaneClicked(self):
self.anglePlanes.valueComboBox()
self.update()
def fiducialInList(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i):
return True
return False
def projectAllFiducials(self):
fidlist = self.logic.getFiducialList()
for i in range(0, fidlist.GetNumberOfFiducials()):
fidid = fidlist.GetNthMarkupID(i)
isMiddlePoint = fidid in self.anglePlanes.middleFiducialDictionary.keys()
if not isMiddlePoint:
self.projectFiducialOnClosestSurface(fidlist, i, self.pointLocatorDictionary)
def UpdateMiddlePointsPositions(self):
current = numpy.zeros(3)
for x in self.anglePlanes.middleFiducialDictionary.keys():
middleFiducial = self.anglePlanes.middleFiducialDictionary[x]
if middleFiducial.nodeID == self.logic.getFiducialList().GetID():
node = slicer.mrmlScene.GetNodeByID(middleFiducial.nodeID)
self.anglePlanes.computeMidPointPosition(node, middleFiducial.P1, middleFiducial.P2, current)
node.RemoveObserver(node.setPointModifiedEventObserverTag)
index = node.GetMarkupIndexByID(x)
node.SetNthFiducialPosition(index, current[0], current[1], current[2])
node.setPointModifiedEventObserverTag = node.AddObserver(node.PointModifiedEvent,
self.onPointModifiedEvent)
if middleFiducial.onSurface:
print "middle on surface"
self.projectFiducialOnClosestSurface(node, index, self.pointLocatorDictionary)
def onPointModifiedEvent(self, obj, event):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update()
def onSurfaceDeplacementStateChanged(self):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update()
def onHideSurface(self):
if self.PlaneIsDefined():
if self.HidePlaneCheckBox.isChecked():
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value, 0)
else:
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value,
self.slideOpacity.value)
def update(self):
self.UpdateMiddlePointsPositions()
if self.PlaneIsDefined():
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value, self.slideOpacity.value)
def projectFiducialOnClosestSurface(self, fidlist, fidid, pointLocatorDictionary):
landmarkCoord = numpy.zeros(3)
fidlist.GetNthFiducialPosition(fidid, landmarkCoord)
minDistance = slicer.sys.float_info.max
minClosestPoint = numpy.zeros(3)
# print "landmark: " + str(landmarkCoord) + ", fidid: " + str(fidid)
keys = pointLocatorDictionary.keys()
foundCloser = False
for i in range(0, len(keys)):
locator = pointLocatorDictionary[keys[i]]
closestpointid = locator.FindClosestPoint(landmarkCoord)
mrmlmodelcollection = slicer.mrmlScene.GetNodesByClassByName("vtkMRMLModelNode", keys[i])
modelnode = mrmlmodelcollection.GetItemAsObject(0)
if not modelnode:
continue
poly = modelnode.GetPolyData()
if poly is None or not hasattr(poly, 'GetPoints'): # It will be equal to None if object does not contain a polydata
continue
closestpoint = poly.GetPoints().GetPoint(closestpointid)
#print "closestpointid:" + str(closestpointid) + ", point: " + str(closestpoint)
distance = numpy.linalg.norm(closestpoint - landmarkCoord)
#print "distance: " + str(distance)
if distance < minDistance:
foundCloser = True
minDistance = distance
minClosestPoint = closestpoint
if foundCloser:
if minClosestPoint[0] != landmarkCoord[0] or minClosestPoint[1] != landmarkCoord[1] or minClosestPoint[2] != \
landmarkCoord[2]:
fidlist.RemoveObserver(fidlist.setPointModifiedEventObserverTag)
fidlist.SetNthFiducialPosition(fidid, minClosestPoint[0], minClosestPoint[1], minClosestPoint[2])
fidlist.setPointModifiedEventObserverTag = fidlist.AddObserver(fidlist.PointModifiedEvent,
self.onPointModifiedEvent)
def addLandMarkClicked(self):
# print "Add landmarks"
# # Place landmarks in the 3D scene
fidlist = self.logic.getFiducialList()
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
selectionNode.SetActivePlaceNodeID(fidlist.GetID())
# print selectionNode
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetCurrentInteractionMode(1)
# To select multiple points in the 3D view, we want to have to click
# on the "place fiducial" button multiple times
placeModePersistence = 0
interactionNode.SetPlaceModePersistence(placeModePersistence)
def onFiducialAdded(self, obj, event):
fidlist = obj
label = fidlist.GetNthFiducialLabel(fidlist.GetNumberOfFiducials() - 1)
self.landmark1ComboBox.addItem(label)
self.landmark2ComboBox.addItem(label)
self.landmark3ComboBox.addItem(label)
class AnglePlanesLogic(ScriptedLoadableModuleLogic):
def __init__(self, id=-1):
self.ColorNodeCorrespondence = {'Red': 'vtkMRMLSliceNodeRed',
'Yellow': 'vtkMRMLSliceNodeYellow',
'Green': 'vtkMRMLSliceNodeGreen'}
self.id = id
self.initialize()
def initialize(self):
self.polydata = vtk.vtkPolyData()
self.points = vtk.vtkPoints()
self.planeSource = vtk.vtkPlaneSource()
self.mapper = vtk.vtkPolyDataMapper()
self.actor = vtk.vtkActor()
def remove(self):
renderer = list()
renderWindow = list()
layoutManager = slicer.app.layoutManager()
for i in range(0, layoutManager.threeDViewCount):
threeDWidget = layoutManager.threeDWidget(i)
threeDView = threeDWidget.threeDView()
renderWindow.append(threeDView.renderWindow())
renderers = renderWindow[i].GetRenderers()
renderer.append(renderers.GetFirstRenderer())
renderer[i].RemoveViewProp(self.actor)
renderWindow[i].AddRenderer(renderer[i])
renderer[i].Render()
self.actor.RemoveAllObservers()
self.actor = None
def getFiducialList(self):
P = self.getFiducialListName()
nodes = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', P)
if nodes.GetNumberOfItems() == 0:
# The list does not exist so we create it
fidNode = slicer.vtkMRMLMarkupsFiducialNode()
fidNode.SetName(P)
slicer.mrmlScene.AddNode(fidNode)
else:
# The list exists but the observers must be updated
fidNode = nodes.GetItemAsObject(0)
return fidNode
def getFiducialListName(self):
return "P" + str(self.id)
def getMatrix(self, slice):
self.mat = slice.GetSliceToRAS()
print self.mat
# ---------------------- RED SLICE -----------------------#
# Matrix with the elements of SliceToRAS
m = numpy.matrix([[self.mat.GetElement(0, 0), self.mat.GetElement(0, 1), self.mat.GetElement(0, 2),
self.mat.GetElement(0, 3)],
[self.mat.GetElement(1, 0), self.mat.GetElement(1, 1), self.mat.GetElement(1, 2),
self.mat.GetElement(1, 3)],
[self.mat.GetElement(2, 0), self.mat.GetElement(2, 1), self.mat.GetElement(2, 2),
self.mat.GetElement(2, 3)],
[self.mat.GetElement(3, 0), self.mat.GetElement(3, 1), self.mat.GetElement(3, 2),
self.mat.GetElement(3, 3)]])
return m
def defineNormal(self, matrix):
# Normal vector to the Red slice:
n_vector = numpy.matrix([[0], [0], [1], [1]])
# point on the Red slice:
A = numpy.matrix([[0], [0], [0], [1]])
normalVector = matrix * n_vector
# print "n : \n", normalVector
A = matrix * A
normalVector1 = normalVector
normalVector1[0] = normalVector[0] - A[0]
normalVector1[1] = normalVector[1] - A[1]
normalVector1[2] = normalVector[2] - A[2]
#print normalVector1
return normalVector1
def getAngle(self, normalVect1, normalVect2):
norm1 = sqrt(
normalVect1[0] * normalVect1[0] + normalVect1[1] * normalVect1[1] + normalVect1[2] * normalVect1[2])
# print "norme 1: \n", norm1
norm2 = sqrt(
normalVect2[0] * normalVect2[0] + normalVect2[1] * normalVect2[1] + normalVect2[2] * normalVect2[2])
#print "norme 2: \n", norm2
scalar_product = (
normalVect1[0] * normalVect2[0] + normalVect1[1] * normalVect2[1] + normalVect1[2] * normalVect2[2])
#print "scalar product : \n", scalar_product
angle = acos(scalar_product / (norm1 * norm2))
#print "radian angle : ", angle
angle_degree = angle * 180 / pi
#print "Angle in degree", angle_degree
norm1_RL = sqrt(normalVect1[1] * normalVect1[1] + normalVect1[2] * normalVect1[2])
#print "norme RL: \n", norm1_RL
norm2_RL = sqrt(normalVect2[1] * normalVect2[1] + normalVect2[2] * normalVect2[2])
#print "norme RL: \n", norm2_RL
if (norm1_RL == 0 or norm1_RL == 0):
self.angle_degre_RL = 0
self.angle_degre_RL_comp = 0
else:
scalar_product_RL = (normalVect1[1] * normalVect2[1] + normalVect1[2] * normalVect2[2])
#print "scalar product : \n", scalar_product_RL
angleRL = acos(scalar_product_RL / (norm1_RL * norm2_RL))
#print "radian angle : ", angleRL
self.angle_degre_RL = angleRL * 180 / pi
self.angle_degre_RL = round(self.angle_degre_RL, 2)
#print self.angle_degre_RL
self.angle_degre_RL_comp = 180 - self.angle_degre_RL
norm1_SI = sqrt(normalVect1[0] * normalVect1[0] + normalVect1[1] * normalVect1[1])
#print "norme1_SI : \n", norm1_SI
norm2_SI = sqrt(normalVect2[0] * normalVect2[0] + normalVect2[1] * normalVect2[1])
#print "norme2_SI : \n", norm2_SI
if (norm1_SI == 0 or norm2_SI == 0):
self.angle_degre_SI = 0
self.angle_degre_SI_comp = 0
else:
scalar_product_SI = (normalVect1[0] * normalVect2[0] + normalVect1[1] * normalVect2[1])
#print "scalar product_SI : \n", scalar_product_SI
angleSI = acos(scalar_product_SI / (norm1_SI * norm2_SI))
#print "radian angle : ", angleSI
self.angle_degre_SI = angleSI * 180 / pi
self.angle_degre_SI = round(self.angle_degre_SI, 2)
#print self.angle_degre_SI
self.angle_degre_SI_comp = 180 - self.angle_degre_SI
#print self.angle_degre_SI_comp
norm1_AP = sqrt(normalVect1[0] * normalVect1[0] + normalVect1[2] * normalVect1[2])
#print "norme1_SI : \n", norm1_AP
norm2_AP = sqrt(normalVect2[0] * normalVect2[0] + normalVect2[2] * normalVect2[2])
#print "norme2_SI : \n", norm2_AP
if (norm1_AP == 0 or norm2_AP == 0):
self.angle_degre_AP = 0
self.angle_degre_AP_comp = 0
else:
scalar_product_AP = (normalVect1[0] * normalVect2[0] + normalVect1[2] * normalVect2[2])
#print "scalar product_SI : \n", scalar_product_AP
#print "VALUE :", scalar_product_AP/(norm1_AP*norm2_AP)
angleAP = acos(scalar_product_AP / (norm1_AP * norm2_AP))
#print "radian angle : ", angleAP
self.angle_degre_AP = angleAP * 180 / pi
self.angle_degre_AP = round(self.angle_degre_AP, 2)
#print self.angle_degre_AP
self.angle_degre_AP_comp = 180 - self.angle_degre_AP
def normalLandmarks(self, GA, GB):
Vn = numpy.matrix([[0], [0], [0]])
Vn[0] = GA[1] * GB[2] - GA[2] * GB[1]
Vn[1] = GA[2] * GB[0] - GA[0] * GB[2]
Vn[2] = GA[0] * GB[1] - GA[1] * GB[0]
# print "Vn = ",Vn
norm_Vn = sqrt(Vn[0] * Vn[0] + Vn[1] * Vn[1] + Vn[2] * Vn[2])
Normal = Vn / norm_Vn
#print "N = ",Normal
return Normal
def planeLandmarks(self, Landmark1Value, Landmark2Value, Landmark3Value, slider, sliderOpacity):
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
fidNode = self.getFiducialList()
r1 = 0
a1 = 0
s1 = 0
coord = numpy.zeros(3)
if Landmark1Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark1Value) - 1, coord)
r1 = coord[0]
a1 = coord[1]
s1 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r2 = 0
a2 = 0
s2 = 0
if Landmark2Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark2Value) - 1, coord)
r2 = coord[0]
a2 = coord[1]
s2 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r3 = 0
a3 = 0
s3 = 0
if Landmark3Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark3Value) - 1, coord)
r3 = coord[0]
a3 = coord[1]
s3 = coord[2]
points = self.points
if points.GetNumberOfPoints() == 0:
points.InsertNextPoint(r1, a1, s1)
points.InsertNextPoint(r2, a2, s2)
points.InsertNextPoint(r3, a3, s3)
else:
points.SetPoint(0, r1, a1, s1)
points.SetPoint(1, r2, a2, s2)
points.SetPoint(2, r3, a3, s3)
polydata = self.polydata
polydata.SetPoints(points)
centerOfMass = vtk.vtkCenterOfMass()
centerOfMass.SetInputData(polydata)
centerOfMass.SetUseScalarsAsWeights(False)
centerOfMass.Update()
G = centerOfMass.GetCenter()
# print "Center of mass = ",G
A = (r1, a1, s1)
B = (r2, a2, s2)
C = (r3, a3, s3)
# Vector GA
GA = numpy.matrix([[0], [0], [0]])
GA[0] = A[0] - G[0]
GA[1] = A[1] - G[1]
GA[2] = A[2] - G[2]
#print "GA = ", GA
# Vector BG
GB = numpy.matrix([[0], [0], [0]])
GB[0] = B[0] - G[0]
GB[1] = B[1] - G[1]
GB[2] = B[2] - G[2]
#print "GB = ", GB
# Vector CG
GC = numpy.matrix([[0], [0], [0]])
GC[0] = C[0] - G[0]
GC[1] = C[1] - G[1]
GC[2] = C[2] - G[2]
#print "GC = ", GC
self.N = self.normalLandmarks(GA, GB)
D = numpy.matrix([[0], [0], [0]])
E = numpy.matrix([[0], [0], [0]])
F = numpy.matrix([[0], [0], [0]])
D[0] = slider * GA[0] + G[0]
D[1] = slider * GA[1] + G[1]
D[2] = slider * GA[2] + G[2]
#print "Slider value : ", slider
#print "D = ",D
E[0] = slider * GB[0] + G[0]
E[1] = slider * GB[1] + G[1]
E[2] = slider * GB[2] + G[2]
#print "E = ",E
F[0] = slider * GC[0] + G[0]
F[1] = slider * GC[1] + G[1]
F[2] = slider * GC[2] + G[2]
#print "F = ",F
planeSource = self.planeSource
planeSource.SetNormal(self.N[0], self.N[1], self.N[2])
planeSource.SetOrigin(D[0], D[1], D[2])
planeSource.SetPoint1(E[0], E[1], E[2])
planeSource.SetPoint2(F[0], F[1], F[2])
planeSource.Update()
plane = planeSource.GetOutput()
mapper = self.mapper
mapper.SetInputData(plane)
mapper.Update()
self.actor.SetMapper(mapper)
self.actor.GetProperty().SetColor(0, 0.4, 0.8)
self.actor.GetProperty().SetOpacity(sliderOpacity)
renderer = list()
renderWindow = list()
layoutManager = slicer.app.layoutManager()
for i in range(0, layoutManager.threeDViewCount):
threeDWidget = layoutManager.threeDWidget(i)
threeDView = threeDWidget.threeDView()
renderWindow.append(threeDView.renderWindow())
renderers = renderWindow[i].GetRenderers()
renderer.append(renderers.GetFirstRenderer())
renderer[i].AddViewProp(self.actor)
renderWindow[i].AddRenderer(renderer[i])
renderer[i].Render()
renderWindow[i].Render()
class AnglePlanesTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.setUp()
self.test_AnglePlanes()
def test_AnglePlanes(self):
self.delayDisplay('Starting the test')
self.delayDisplay('Adding planes')
widget = AnglePlanesWidget()
widget.addNewPlane()
widget.addNewPlane()
self.delayDisplay('Adding fiducials')
fidlist1 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P1").GetItemAsObject(0)
fidlist1.AddFiducial(10, 10, 10)
fidlist1.AddFiducial(20, 20, 20)
fidlist1.AddFiducial(10, 20, 30)
fidlist2 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P2").GetItemAsObject(0)
fidlist2.AddFiducial(50, 50, 50)
fidlist2.AddFiducial(40, 20, 80)
fidlist2.AddFiducial(10, 40, 20)
self.delayDisplay('Saving planes')
widget.savePlanes("test.p")
self.delayDisplay('Loading planes')
widget.readPlanes("test.p")
self.delayDisplay('Selecting fiducials')
widget.planeControlsDictionary["Plane 1"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 1"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 1"].landmark3ComboBox.setCurrentIndex(3)
widget.planeControlsDictionary["Plane 2"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 2"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 2"].landmark3ComboBox.setCurrentIndex(3)
self.delayDisplay('Selecting planes')
widget.planeComboBox1.setCurrentIndex(5)
widget.planeComboBox2.setCurrentIndex(6)
self.delayDisplay('Calculating angle')
widget.angleValue()
test = widget.logic.angle_degre_RL != 59.06 or widget.logic.angle_degre_RL_comp != 120.94 or widget.logic.angle_degre_SI != 12.53 or widget.logic.angle_degre_SI_comp != 167.47 or widget.logic.angle_degre_AP != 82.56 or widget.logic.angle_degre_AP_comp != 97.44
self.delayDisplay('Testing angles')
if test:
print "", "Angle", "Complementary"
print "R-L-View", self.logic.angle_degre_RL, self.logic.angle_degre_RL_comp
print "S-I-View", self.logic.angle_degre_SI, self.logic.angle_degre_SI_comp
print "A-P-View", self.logic.angle_degre_AP, self.logic.angle_degre_AP_comp
self.delayDisplay('Test Failure!')
else:
self.delayDisplay('Test passed!')
widget.parent.close()
|
fbudin69500/AnglePlanes-Extension
|
AnglePlanes/AnglePlanes.py
|
Python
|
apache-2.0
| 69,029
|
[
"VTK"
] |
2e5aee6ea155507ee7e9df53b1e32b7e42c258bdc12fa10a5bb1ee71a1c66f99
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for StarOffice and OpenOffice."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.braille as braille
import orca.braille_generator as braille_generator
import orca.object_properties as object_properties
import orca.settings_manager as settings_manager
_settingsManager = settings_manager.getManager()
class BrailleGenerator(braille_generator.BrailleGenerator):
# pylint: disable-msg=W0142
def __init__(self, script):
super().__init__(script)
def _generateRoleName(self, obj, **args):
if self._script.utilities.isDocument(obj):
return []
if self._script.utilities.isFocusableLabel(obj):
return []
return super()._generateRoleName(obj, **args)
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings that represent the row header for an
object that is in a table, if it exists. Otherwise, an empty
array is returned. Overridden here so that we can get the
dynamic row header(s).
"""
newOnly = args.get('newOnly', False)
rowHeader, columnHeader = \
self._script.utilities.getDynamicHeadersForCell(obj, newOnly)
if not rowHeader:
return []
text = self._script.utilities.displayedText(rowHeader)
if text:
return [text]
return []
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings that represent the column header for an
object that is in a table, if it exists. Otherwise, an empty
array is returned. Overridden here so that we can get the
dynamic column header(s).
"""
newOnly = args.get('newOnly', False)
rowHeader, columnHeader = \
self._script.utilities.getDynamicHeadersForCell(obj, newOnly)
if not columnHeader:
return []
text = self._script.utilities.displayedText(columnHeader)
if text:
return [text]
return []
def _generateRealTableCell(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRealTableCell(obj, **args)
if not obj.childCount:
result = super()._generateRealTableCell(obj, **args)
else:
result = []
formatType = args.get('formatType')
args['formatType'] = 'focused'
for child in obj:
result.extend(self.generate(child, **args))
args['formatType'] = formatType
if not self._script.utilities.isSpreadSheetCell(obj):
return result
try:
objectText = self._script.utilities.substring(obj, 0, -1)
cellName = self._script.utilities.spreadSheetCellName(obj)
except:
return []
return [braille.Component(obj, " ".join((objectText, cellName)))]
def _generateTableCellDelimiter(self, obj, **args):
return braille.Region(object_properties.TABLE_CELL_DELIMITER_BRAILLE)
def _generateTableCellRow(self, obj, **args):
if not self._script.utilities.shouldReadFullRow(obj):
return self._generateRealTableCell(obj, **args)
if not self._script.utilities.isSpreadSheetCell(obj):
return super()._generateTableCellRow(obj, **args)
cells = self._script.utilities.getShowingCellsInSameRow(obj)
if not cells:
return []
result = []
for cell in cells:
cellResult = self._generateRealTableCell(cell, **args)
if cellResult and result:
result.append(self._generateTableCellDelimiter(obj, **args))
result.extend(cellResult)
return result
def _generateChildTab(self, obj, **args):
"""If we are in the slide presentation scroll pane, also announce the
current page tab. See bug #538056 for more details.
"""
result = []
rolesList = [pyatspi.ROLE_SCROLL_PANE, \
pyatspi.ROLE_PANEL, \
pyatspi.ROLE_PANEL, \
pyatspi.ROLE_ROOT_PANE, \
pyatspi.ROLE_FRAME, \
pyatspi.ROLE_APPLICATION]
if self._script.utilities.hasMatchingHierarchy(obj, rolesList):
for child in obj.parent:
if child.getRole() == pyatspi.ROLE_PAGE_TAB_LIST:
for tab in child:
eventState = tab.getState()
if eventState.contains(pyatspi.STATE_SELECTED):
args['role'] = tab.getRole()
result.extend(self.generate(tab, **args))
return result
def _generateAncestors(self, obj, **args):
if self._script._lastCommandWasStructNav:
return []
return super()._generateAncestors(obj, **args)
def _generateIncludeContext(self, obj, **args):
if self._script._lastCommandWasStructNav:
return False
return super()._generateIncludeContext(obj, **args)
def generateBraille(self, obj, **args):
args['useDefaultFormatting'] = self._script.utilities.isNonFocusableList(obj)
oldRole = self._overrideRole(self._getAlternativeRole(obj, **args), args)
result = super().generateBraille(obj, **args)
del args['useDefaultFormatting']
self._restoreRole(oldRole, args)
return result
|
GNOME/orca
|
src/orca/scripts/apps/soffice/braille_generator.py
|
Python
|
lgpl-2.1
| 6,388
|
[
"ORCA"
] |
a2343789c745cadc3a3e0fb2631a9956d272173ae6b206e5226c58d4257509ad
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.shapes import SimplePore, Cylinder
# Integration test for simple pore
# The rationale is to hit the pore everywhere with particles
# and check that it does not blow up. The cylinder is needed
# because the pore is tilted with respect to the box, without
# it particles could enter the constraint over the periodic boundaries,
# leading to force jumps.
@utx.skipIfMissingFeatures(["LENNARD_JONES"])
class SimplePoreConstraint(ut.TestCase):
def test_orientation(self):
pore = SimplePore(axis=[1., 0., 0.], radius=2., smoothing_radius=.1,
length=2., center=[5., 5., 5.])
d, _ = pore.calc_distance(position=[.0, .0, .0])
self.assertGreater(d, 0.)
d, _ = pore.calc_distance(position=[5., 5., .0])
self.assertLess(d, 0.)
def test_stability(self):
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
box_yz = 15.
box_x = 20.
s.box_l = [box_x, box_yz, box_yz]
s.time_step = 0.01
s.cell_system.skin = 0.4
lj_eps = 1.0
lj_sig = 1.0
lj_cut = lj_sig * 2**(1. / 6.)
s.constraints.add(
particle_type=0, penetrable=False, only_positive=False,
shape=SimplePore(
axis=[1., 0.5, 0.5], radius=3., smoothing_radius=.1,
length=5, center=[.5 * box_x, .5 * box_yz, .5 * box_yz]))
s.constraints.add(
particle_type=0, penetrable=False, only_positive=False,
shape=Cylinder(
axis=[1., 0, 0], radius=0.5 * box_yz, length=4 * lj_cut + box_x,
center=[.5 * box_x, .5 * box_yz, .5 * box_yz], direction=-1))
s.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
for i in range(200):
rpos = [i * (box_x / 200.), 0.5 * box_yz, 0.5 * box_yz]
s.part.add(pos=rpos, type=1, v=[1., 1., 1.])
start_energy = s.analysis.energy()['total']
s.integrator.run(1000)
end_energy = s.analysis.energy()['total']
rel_diff = abs(end_energy - start_energy) / start_energy
self.assertLess(rel_diff, 1e-3)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/simple_pore.py
|
Python
|
gpl-3.0
| 3,020
|
[
"ESPResSo"
] |
470aa1efb6d07874aea074df8c1cc5252a7b90f3612f5870b0fe96ac0990f3ea
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation methods related to volume based on cclib data."""
from __future__ import print_function
import copy
import numpy
try:
from PyQuante.CGBF import CGBF
from cclib.bridge import cclib2pyquante
module_pyq = True
except:
module_pyq = False
try:
from pyvtk import *
from pyvtk.DataSetAttr import *
module_pyvtk = True
except:
module_pyvtk = False
from cclib.parser.utils import convertor
class Volume(object):
"""Represent a volume in space.
Required parameters:
origin -- the bottom left hand corner of the volume
topcorner -- the top right hand corner
spacing -- the distance between the points in the cube
Attributes:
data -- a numpy array of values for each point in the volume
(set to zero at initialisation)
numpts -- the numbers of points in the (x,y,z) directions
"""
def __init__(self, origin, topcorner, spacing):
self.origin = origin
self.spacing = spacing
self.topcorner = topcorner
self.numpts = []
for i in range(3):
self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) )
self.data = numpy.zeros( tuple(self.numpts), "d")
def __str__(self):
"""Return a string representation."""
return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner,
self.spacing)
def write(self, filename, format="Cube"):
"""Write the volume to file."""
format = format.upper()
if format.upper() not in ["VTK", "CUBE"]:
raise "Format must be either VTK or Cube"
elif format=="VTK":
self.writeasvtk(filename)
else:
self.writeascube(filename)
def writeasvtk(self, filename):
if not module_pyvtk:
raise Exception("You need to have pyvtk installed")
ranges = (numpy.arange(self.data.shape[2]),
numpy.arange(self.data.shape[1]),
numpy.arange(self.data.shape[0]))
v = VtkData(RectilinearGrid(*ranges), "Test",
PointData(Scalars(self.data.ravel(), "from cclib", "default")))
v.tofile(filename)
def integrate(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()) * boxvol
def integrate_square(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()**2) * boxvol
def writeascube(self, filename):
# Remember that the units are bohr, not Angstroms
convert = lambda x : convertor(x, "Angstrom", "bohr")
ans = []
ans.append("Cube file generated by cclib")
ans.append("")
format = "%4d%12.6f%12.6f%12.6f"
origin = [convert(x) for x in self.origin]
ans.append(format % (0, origin[0], origin[1], origin[2]))
ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0))
ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0))
ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2])))
line = []
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
for k in range(self.data.shape[2]):
line.append(scinotation(self.data[i][j][k]))
if len(line)==6:
ans.append(" ".join(line))
line = []
if line:
ans.append(" ".join(line))
line = []
outputfile = open(filename, "w")
outputfile.write("\n".join(ans))
outputfile.close()
def scinotation(num):
"""Write in scientific notation
>>> scinotation(1./654)
' 1.52905E-03'
>>> scinotation(-1./654)
'-1.52905E-03'
"""
ans = "%10.5E" % num
broken = ans.split("E")
exponent = int(broken[1])
if exponent<-99:
return " 0.000E+00"
if exponent<0:
sign="-"
else:
sign="+"
return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12)
def getbfs(coords, gbasis):
"""Convenience function for both wavefunction and density based on PyQuante Ints.py."""
mymol = makepyquante(coords, [0 for x in coords])
sym2powerlist = {
'S' : [(0,0,0)],
'P' : [(1,0,0),(0,1,0),(0,0,1)],
'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)],
'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2),
(0,3,0),(0,2,1),(0,1,2), (0,0,3)]
}
bfs = []
for i,atom in enumerate(mymol):
bs = gbasis[i]
for sym,prims in bs:
for power in sym2powerlist[sym]:
bf = CGBF(atom.pos(),power)
for expnt,coef in prims:
bf.add_primitive(expnt,coef)
bf.normalize()
bfs.append(bf)
return bfs
def wavefunction(coords, mocoeffs, gbasis, volume):
"""Calculate the magnitude of the wavefunction at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for one eigenvalue
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
"""
bfs = getbfs(coords, gbasis)
wavefn = copy.copy(volume)
wavefn.data = numpy.zeros( wavefn.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion
for bs in range(len(bfs)):
data = numpy.zeros( wavefn.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
for k,zval in enumerate(z):
data[i, j, k] = bfs[bs].amp(xval,yval,zval)
numpy.multiply(data, mocoeffs[bs], data)
numpy.add(wavefn.data, data, wavefn.data)
return wavefn
def electrondensity(coords, mocoeffslist, gbasis, volume):
"""Calculate the magnitude of the electron density at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of numpy arrays. The list will be of length 1
for restricted calculations, and length 2 for unrestricted.
"""
bfs = getbfs(coords, gbasis)
density = copy.copy(volume)
density.data = numpy.zeros( density.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion
y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion
z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros( density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros( density.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
tmp = []
for k,zval in enumerate(z):
tmp.append(bfs[bs].amp(xval, yval, zval))
data[i,j,:] = tmp
numpy.multiply(data, mocoeff[bs], data)
numpy.add(wavefn, data, wavefn)
density.data += wavefn**2
if len(mocoeffslist) == 1:
density.data = density.data*2. # doubly-occupied
return density
if __name__=="__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
from cclib.io import ccopen
import logging
a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
a.logger.setLevel(logging.ERROR)
c = a.parse()
b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
b.logger.setLevel(logging.ERROR)
d = b.parse()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
c.gbasis, vol)
assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns
print(wavefn.integrate(), wavefn.integrate_square())
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]]
density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol)
assert abs(density.integrate()-8.00)<1E-2
print("Combined Density of 4 Frontier orbitals=",density.integrate())
|
Schamnad/cclib
|
src/cclib/method/volume.py
|
Python
|
bsd-3-clause
| 9,899
|
[
"Gaussian",
"VTK",
"cclib"
] |
c1df094f373b02fd5f7f6842417993189684d8ee5561dda5d459fe650a724fc1
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espressopp.esutil import pmiimport
pmiimport('espressopp.integrator')
from espressopp.integrator.MDIntegrator import *
from espressopp.integrator.VelocityVerlet import *
from espressopp.integrator.VelocityVerletOnGroup import *
from espressopp.integrator.Isokinetic import *
from espressopp.integrator.StochasticVelocityRescaling import *
from espressopp.integrator.TDforce import *
from espressopp.integrator.FreeEnergyCompensation import *
from espressopp.integrator.OnTheFlyFEC import *
from espressopp.integrator.Extension import *
from espressopp.integrator.Adress import *
from espressopp.integrator.BerendsenBarostat import *
from espressopp.integrator.BerendsenBarostatAnisotropic import *
from espressopp.integrator.BerendsenThermostat import *
from espressopp.integrator.LangevinThermostat import *
from espressopp.integrator.LangevinThermostatHybrid import *
from espressopp.integrator.LangevinThermostat1D import *
from espressopp.integrator.GeneralizedLangevinThermostat import *
from espressopp.integrator.LangevinThermostatOnGroup import *
from espressopp.integrator.DPDThermostat import *
from espressopp.integrator.LangevinBarostat import *
from espressopp.integrator.FixPositions import *
from espressopp.integrator.LatticeBoltzmann import *
from espressopp.integrator.LBInit import *
from espressopp.integrator.LBInitConstForce import *
from espressopp.integrator.LBInitPeriodicForce import *
from espressopp.integrator.LBInitPopUniform import *
from espressopp.integrator.LBInitPopWave import *
from espressopp.integrator.ExtForce import *
from espressopp.integrator.CapForce import *
from espressopp.integrator.ExtAnalyze import *
from espressopp.integrator.Settle import *
from espressopp.integrator.Rattle import *
from espressopp.integrator.VelocityVerletOnRadius import *
from espressopp.integrator.AssociationReaction import *
from espressopp.integrator.EmptyExtension import *
from espressopp.integrator.MinimizeEnergy import *
|
fedepad/espressopp
|
src/integrator/__init__.py
|
Python
|
gpl-3.0
| 2,833
|
[
"ESPResSo"
] |
09929ecb6ef776e41581b8c84278d3b6f4b9ad944b0832d136ca9cd0bcd5142a
|
# -*- coding:utf-8 -*-
"""PyHdust *spectools* module: spectroscopy tools
Algumas definicoes: para todas as rotinas funcionarem, todos os espectros devem
estar agrupados num mesmo caminho (`path`), em estrutura de
noite/estrelas/espec.
Currently the functions only read ``*.cal.fits`` files. The ``.cal`` suffix means
a header with the following keywords:
* 'MJD-OBS' or 'MJD' or 'JD' or 'DATE-OBS'
* 'CRVAL1' + 'CDELT1'
A useful tool for normalizing spectra with Python (not used/imported here):
https://python4esac.github.io/plotting/specnorm.html
:license: GNU GPL v3.0 https://github.com/danmoser/pyhdust/blob/master/LICENSE
"""
from __future__ import print_function
import os as _os
import numpy as _np
import datetime as _dt
# import time as _time
from glob import glob as _glob
# from itertools import product as _iproduct
import pyhdust.phc as _phc
import pyhdust.jdcal as _jdcal
import pyhdust.input as _inp
import pyhdust.stats as _stt
import pyhdust as _hdt
from six import string_types as _strtypes
from shutil import copyfile as _copyfile
import warnings as _warn
import requests as _requests
import astropy.io.fits as _pyfits
# from lmfit import Model as _Model
try:
# import astropy.coordinates.sky_coordinate.SkyCoord as _SkyCoord
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import matplotlib.patches as _mpatches
from matplotlib.ticker import MaxNLocator as _MaxNLocator
import matplotlib.gridspec as _gridspec
import scipy.interpolate as _interpolate
from scipy.optimize import curve_fit as _curve_fit
from scipy.stats import percentileofscore as _pos
from astropy.modeling import models as _models
from astropy.modeling import fitting as _fitting
import pandas as _pd
import wget as _wget
import xmltodict as _xmltodict
except ImportError:
_warn.warn('matplotlib, scipy and/or pandas module not installed!!!')
#try:
# import pyqt_fit.nonparam_regression as _smooth
# from pyqt_fit import npr_methods as _npr_methods
#except ImportError:
# _warn.warn('pyqt_fit module not installed!!!')
__author__ = "Daniel Moser"
__email__ = "dmfaes@gmail.com"
_outfold = ''
class Spec(object):
"""Definicao de classe espectro para conter toda a informacao util
para plots e analises.
EW in km/s
Para configurar uma ou mais linhas:
>>> spdtb = Spec()
>>> spdtb.lbc == 0
>>> #significa que vetor wl eh vetor velocidades, e nao comprimento de
>>> # onda.
>>> spdtb.lbc = 6564.
>>> spdtb2 = Spec()
>>> spdtb2.lbc = 4863.
Como usar (hard way):
>>> spdtb = Spec()
>>> #read spec `flux` and `wl` for a given `lbc`
>>> (spdtb.EW, spdtb.EC, spdtb.VR, spdtb.peaksep, spdtb.depthcent,\\
>>> spdtb.F0) = analline(wl, flux, lbc)
>>> spdtb.MJD = 1
>>> spdtb.file = file
And then:
>>> #to record it to the database:
>>> spdtb.addspec()
Para carregar uma tabela anterior, faca:
>>> spdtb = Spec()
>>> #(...) read new specs and then join with previous ones
>>> spdtb.data = _np.vstack((spdtb.data, _np.loadtxt('hdt/datafile.txt')))
>>> spdtb.metadata = _np.vstack(( spdtb.metadata, \\
>>> _np.loadtxt('hdt/metafile.txt') ))
>>> spdtb.updatecount() #to update the counter
Ou simplesmente (nome de arquivos default):
>>> spdtb.loaddata()
"""
def __init__(self, wl=None, flux=None, lbc=None, hwidth=1000., EW=_np.NaN,
EC=_np.NaN, VR=_np.NaN, peaksep=_np.NaN, depthcent=_np.NaN, F0=_np.NaN,
dateobs='', MJD=0., datereduc='', file='', gaussfit=False):
self.wl = wl
self.flux = flux
self.lbc = lbc
self.hwidth = hwidth
self.EW = EW
self.EC = EC
self.VR = VR
self.peaksep = peaksep
self.depthcent = depthcent
self.F0 = F0
self.file = file
self.datereduc = datereduc
self.dateobs = dateobs
self.MJD = MJD
self.count = 0
self.data = _np.empty(0)
self.metadata = _np.empty(0)
self.gaussfit = gaussfit
def reset(self):
"""Reset the class parameters
"""
self.wl = None
self.flux = None
self.EW = _np.NaN
self.EC = _np.NaN
self.VR = _np.NaN
self.peaksep = _np.NaN
self.depthcent = _np.NaN
self.F0 = _np.NaN
self.file = ''
self.datereduc = ''
self.dateobs = ''
self.MJD = 0.
def clear(self):
"""Clear the class parameters
"""
self.__init__()
def addspec(self):
"""Record the class parameters into the database
"""
self.count += 1
if self.count == 1:
self.data = _np.array( self.lastinfo() )
self.metadata = _np.array( self.lastmeta() )
else:
self.data = _np.vstack(( self.data, self.lastinfo() ))
self.metadata = _np.vstack(( self.metadata, self.lastmeta() ))
# if self.flux != None and self.wl != None and self.lbc != None:
# self.savespec()
def lastinfo(self):
"""Print the current class parameters (last spec)
"""
return self.MJD, self.EW, self.EC, self.VR, self.peaksep, \
self.depthcent, self.F0
def lastmeta(self):
"""Print the current class parameters (last spec)
"""
return self.MJD, self.dateobs, self.datereduc, self.file
def savedata(self, datafile=_outfold + '/datafile.txt',
metafile=_outfold + '/metafile.txt'):
"""Save current table
"""
header = ['MJD', 'EW', 'EC', 'VR', 'peaksep', 'depthcent', 'F0']
_np.savetxt(datafile, self.data, fmt='%12.6f',
header=(len(header) * '{:>12s}').format(*header))
_np.savetxt(metafile, self.metadata, fmt='%s', delimiter=',')
return
def loaddata(self, datafile=_outfold + '/datafile.txt',
metafile=_outfold + '/metafile.txt'):
"""Function to load a previous table
Usage:
>>> spdtb = Spec()
>>> spdtb.loaddata()
"""
self.data = _np.loadtxt(datafile)
if _os.path.exists(metafile):
self.metadata = _np.genfromtxt(metafile, dtype='str',
delimiter=',')
self.updatecount()
return
def updatecount(self, num=0):
if num > 0:
self.count = num
else:
self.count = len(self.data)
return
def loadspec(self, file):
"""Load a fits file (parameters `wl`, `flux`, `MJD`, `dateobs`,
`datareduc` and `file`).
Currently, only compatible for standard fits.
"""
if file.find('.fit') == -1:
_warn.warn("# ERROR! `loadspec` unrecognized format!")
return
(self.wl, self.flux, self.MJD, self.dateobs, self.datereduc,
self.file) = loadfits(file)
(self.EW, self.EC, self.VR, self.peaksep, self.depthcent, self.F0) = \
analline(self.wl, self.flux, self.lbc, hwidth=self.hwidth,
verb=False, gaussfit=self.gaussfit)
return
def plotspec(self, outname=''):
"""Export current spec into a PNG file.
"""
if self.wl is None or self.flux is None:
_warn.warn('wrong Spec() parameters! {0}'.format(self.file))
return
if outname == '':
path, file = _phc.trimpathname(self.file)
outname = _phc.rmext(file)
# Normalization:
flux = linfit(self.wl, self.flux)
wl = self.wl
fig = _plt.figure()
ax = fig.add_subplot(111)
ax.plot(wl, flux)
ax.set_ylabel('norm. flux')
ax.set_xlabel('wavelength (arb. units)')
ax.set_title(outname)
_plt.savefig('{0}/{1:.2f}_{2}.png'.format(_outfold, self.MJD, outname))
if self.lbc > 0:
vels = (self.wl - self.lbc) / self.lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= self.hwidth)
flux = linfit(vels[idx], flux[idx])
vels = vels[idx]
_plt.clf()
ax = fig.add_subplot(111)
ax.plot(vels, flux)
ax.set_ylabel('norm. flux')
ax.set_xlabel('vel. (km/s)')
ax.set_title('{0:.2f} {1} {2:.2f}'.format(self.MJD, outname,
self.lbc))
_plt.savefig('{0}/{1:.2f}_{2}_{3:.2f}.png'.format(_outfold,
self.MJD, outname, self.lbc))
_plt.close()
return
def shiftfits(fitsfile, newsh=None, verbose=False):
""" Update FITS spec header for a given shift value. """
imfits = _pyfits.open(fitsfile, mode='update')
if 'WLSHIFT' in imfits[0].header:
if verbose:
print('# WLSHIFT = {0} for {1}'.format(imfits[0].header['WLSHIFT'],
_phc.trimpathname(fitsfile)[1]))
else:
if verbose:
print('# No WLSHIFT available for {0}'.format(
_phc.trimpathname(fitsfile)[1]))
if newsh is None:
newsh = _phc.user_input('Type the new shift: ')
if newsh != '':
imfits[0].header['WLSHIFT'] = float(newsh)
imfits.close()
return
def checkshiftfits(fitslist, lbc=6562.8):
""" Do *shiftfits* sistematically
INPUT: list of files
OUTPUT: fits files header updated with WLSHIFT.
"""
fig, ax = _plt.subplots()
for f in fitslist:
data = loadfits(f)
vel, flx = lineProf(data[0], data[1], lbc=lbc)
good = False
imfits = _pyfits.open(f)
if 'WLSHIFT' in imfits[0].header:
shift0 = float(imfits[0].header['WLSHIFT'])
else:
shift0 = 0.
shift = 0
while not good:
ax.plot([0, 0], [0.7, 1.2], ls='--', color='gray')
veli = vel + shift*3e5/lbc
ax.plot(veli, flx)
_plt.show()
_plt.draw()
ri = _phc.user_input('\n# Is it good?(y/other): ')
if ri != 'y':
try:
shift = float(_phc.user_input('Type shift: '))
except ValueError:
shift = 0.
else:
good = True
ax.cla()
if shift != 0:
shiftfits(f, newsh=shift+shift0)
_plt.close(fig)
return
def loadfits(fitsfile):
"""load FITS spec
Out: wl, flux, MJD, dateobs, datereduc, fitsfile
"""
imfits = _pyfits.open(fitsfile)
flux = imfits[0].data
wl = _np.arange(len(flux)) * imfits[0].header['CDELT1'] +\
imfits[0].header['CRVAL1']
(MJD, dateobs, datereduc) = (0., '', '')
dtinfo = False
if not dtinfo and 'MJD-OBS' in imfits[0].header:
MJD = float(imfits[0].header['MJD-OBS'])
dtinfo = True
if not dtinfo and 'MJD' in imfits[0].header:
MJD = float(imfits[0].header['MJD'])
dtinfo = True
if not dtinfo and 'JD' in imfits[0].header:
if isinstance(imfits[0].header['JD'], _strtypes):
if len(imfits[0].header['JD']) > 0:
MJD = float(imfits[0].header['JD']) - 2400000.5
dtinfo = True
else:
MJD = imfits[0].header['JD'] - 2400000.5
dtinfo = True
if not dtinfo and 'DATE-OBS' in imfits[0].header:
if len(imfits[0].header['DATE-OBS']) > 0:
dtobs = imfits[0].header['DATE-OBS']
dtobs, tobs = check_dtobs(dtobs)
MJD = _jdcal.gcal2jd(*dtobs)[1] + tobs
dtinfo = True
if not dtinfo and 'FRAME' in imfits[0].header:
dtobs = imfits[0].header['FRAME']
dtobs, tobs = check_dtobs(dtobs)
MJD = _jdcal.gcal2jd(*dtobs)[1] + tobs
dtinfo = True
if not dtinfo:
MJD = _jdcal.MJD_JD2000
_warn.warn('No DATE-OBS information is available! {0}\nAssuming '
'MJD_JD2000'.format(fitsfile))
if 'DATE-OBS' in imfits[0].header:
dateobs = imfits[0].header['DATE-OBS']
elif 'FRAME' in imfits[0].header:
dateobs = imfits[0].header['FRAME']
if 'IRAF-TLM' in imfits[0].header:
datereduc = imfits[0].header['IRAF-TLM']
elif 'DATE' in imfits[0].header:
datereduc = imfits[0].header['DATE']
if 'WLSHIFT' in imfits[0].header:
shift = float(imfits[0].header['WLSHIFT'])
wl += shift
imfits.close()
return wl, flux, MJD, dateobs, datereduc, fitsfile
def vac2air(wl):
"""The IAU standard for conversion from air to vacuum wavelengths is given
in Morton (1991, ApJS, 77, 119). For vacuum wavelengths (VAC) in Angstroms,
convert to air wavelength (AIR) via:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4 )
"""
return wl / (1.0 + 2.735182E-4 + 131.4182 / wl**2 + 2.76249E8 / wl**4 )
def air2vac(wl):
"""The IAU standard for conversion from air to vacuum wavelengths is given
in Morton (1991, ApJS, 77, 119). For vacuum wavelengths (VAC) in Angstroms,
convert to air wavelength (AIR) via:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4 )
Fitting the inverse curve:
VAC = AIR / (1.0 - 2.73443407E-4 - 1.31275255E2 / AIR^2 - 2.75708212E8 /
AIR^4 )
"""
return wl / (1.0 - 2.73443407e-04 - 1.31275255e+02 / wl**2 -
2.75708212e+08 / wl**4)
def vel2wl(vel, lbc):
""" Vel. to wavelength. Vel must be in km/s and output is in `lbc` units.
"""
wl = (vel / _phc.c.cgs * 1e5 + 1) * lbc
return wl
def wl2vel(wl, lbc):
""" Wavelength to vel., in km/s. `wl` and `lbc` units must be the same. """
vels = (wl - lbc) / lbc * _phc.c.cgs * 1e-5
return vels
def hydrogenlinewl(ni, nf):
"""Generate H line transitions wavelengths in meters for VACUUM
Rydberg constant `R` was manually adjusted to fit Halpha and Hbeta lines.
"""
return (10967850. * (1. / nf**2 - 1. / ni**2))**-1.
def calcres_R(hwidth=1350, nbins=108):
"""
(h)Width in km/s.
*WARNING*: `width` in HDUST input is only half.
To HDUST effective R, multiple the input width by 2 he_re.
# R = lbd/Dlbd = _phc.c/Dv = _phc.c*nbins/width
# nbins = R*width/_phc.c
"""
return round(_phc.c.cgs * nbins / hwidth / 1e5)
def calcres_nbins(R=12000, hwidth=1350):
"""
(h)Width in km/s.
*WARNING*: `width` in HDUST input is only half.
To HDUST effective R, multiple the input width by 2 he_re.
# R = lbd/Dlbd = _phc.c/Dv = _phc.c*nbins/width
# nbins = R*width/_phc.c
"""
return round(R * hwidth * 1e5 / _phc.c.cgs)
def lineProf(x, flx, lbc, flxerr=_np.empty(0), hwidth=1000., ssize=0.05):
'''
lineProf() - retorna um array (flx) normalizado e um array x em
VELOCIDADES. `lbc` deve fornecido em mesma unidade de x para conversão
lambda -> vel. Se vetor x jah esta em vel., usar funcao linfit().
x eh importante, pois y pode ser nao igualmente amostrado.
x e y devem estar em ordem crescente.
ssize = % do tamanho de y; numero de pontos usados nas extremidades
para a media do contínuo. 'ssize' de .5 à 0 (exclusive).
OUTPUT: vel (array), flx (array)
'''
x = (x - lbc) / lbc * _phc.c.cgs * 1e-5 # km/s
idx = _np.where(_np.abs(x) <= 1.001 * hwidth)
if len(flxerr) == 0:
flux = linfit(x[idx], flx[idx], ssize=ssize) # yerr=flxerr,
if len(x[idx]) == 0:
_warn.warn('Wrong `lbc` in the lineProf function')
return x[idx], flux
else:
flux, flxerr = linfit(x[idx], flx[idx], yerr=flxerr[idx], ssize=ssize)
if len(x[idx]) == 0:
_warn.warn('Wrong `lbc` in the lineProf function')
return x[idx], flux, flxerr
def linfit(x, y, ssize=0.05, yerr=_np.empty(0)):
r'''
linfit() - retorna um array (y) normalizado, em posicoes de x
x eh importante, pois y pode ser nao igualmente amostrado.
x e y devem estar em ordem crescente.
ssize = % do tamanho de y; numero de pontos usados nas extremidades
para a media do contínuo. 'ssize' de .5 à 0 (exclusive).
OUTPUT: y, yerr (if given)
.. code:: python
#Example:
import numpy as np
import matplotlib.pyplot as plt
import pyhdust.phc as phc
import pyhdust.spectools as spt
wv = np.linspace(6500, 6600, 101)
flx = (np.arange(101)[::-1])/100.+1+phc.normgauss(4, x=wv,
xc=6562.79)*5
plt.plot(wv, flx)
normflx = spt.linfit(wv, flx)
plt.plot(wv, normflx, ls='--')
plt.xlabel(r'$\lambda$ ($\AA$)')
plt.ylabel('Flux (arb. unit)')
.. image:: _static/spt_linfit.png
:align: center
:width: 500
'''
ny = _np.array(y)[:]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 9:
medy0, medy1 = _np.median(ny[:ssize]), _np.median(ny[-ssize:])
else:
medy0, medy1 = _np.average(ny[:ssize]), _np.average(ny[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
idx = _np.where(new_y != 0)
ny[idx] = ny[idx] / new_y[idx]
if len(yerr) == 0.:
return ny
else:
yerr = yerr / _np.average(new_y)
return ny, yerr
def EWcalc(vels, flux, vw=1000):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenados.
Devolve o valor EW.
"""
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
ew = 0.
if len(outvels) < 3:
# normflux = _np.ones(len(outvels))
return ew
for i in range(len(outvels) - 1):
dl = outvels[i + 1] - outvels[i]
# print(dl)
ew += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
return ew
def absLineCalc(vels, flux, vw=1000, ssize=0.05):
r"""
Calculate the line flux (input velocity vector). The `flux` is
NON-normalized.
``ssize`` parameter controns the size of flux that will be evaluated at the
extreme of the input flux array to determine the continuum level.
``vels = (wv - lbc) / lbc * phc.c.cgs * 1e-5 # km/s``
Output in the same flux units times :math:`\Delta v` (both flux and *v*
input units).
"""
idx = _np.where(_np.abs(vels) <= vw)
vels = vels[idx]
flux = flux[idx]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(flux))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(vels[:ssize]), _np.average(vels[-ssize:])
if ssize > 9:
medy0, medy1 = _np.median(flux[:ssize]), _np.median(flux[-ssize:])
else:
medy0, medy1 = _np.average(flux[:ssize]), _np.average(flux[-ssize:])
new_y = medy0 + (medy1 - medy0) * (vels - medx0) / (medx1 - medx0)
base = _np.trapz(new_y, vels)
line = _np.trapz(flux, vels)
return line - base
def gauss_fit(x, y, a0=None, x0=None, sig0=None, emission=True, ssize=0.05):
""" Return the area of a fitting Gaussian.
"""
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(y[:ssize]), _np.median(y[-ssize:])
else:
medy0, medy1 = _np.average(y[:ssize]), _np.average(y[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
q = 95
func = _np.max
if not emission:
func = _np.min
q = 5
if a0 is None:
a0 = _np.abs(_np.percentile(y-new_y, q)) - _np.median(y-new_y)
if x0 is None:
x0 = x[_np.where(y-new_y == func(y-new_y))]
if sig0 is None:
sig0 = (_np.max(x)-_np.min(x))/10.
g_init = _models.Gaussian1D(amplitude=a0, mean=x0, stddev=sig0)
g_init.bounds['amplitude'] = (0, 2*a0)
# g_init.verblevel = 0
fit_g = _fitting.LevMarLSQFitter()
# print(a0, x0, sig0, _np.shape(a0), _np.shape(x0), _np.shape(sig0),
# _np.shape(x), _np.shape(y))
g = fit_g(g_init, x, y-new_y)
# print(g.parameters[0])
return g, new_y
def absLineDeb(wv, flux, lb0, lb1, vw=1000, ssize=0.05, a0=None, sig0=None,
allout=False):
""" Return the area of a fitting Gaussian with debblending.
"""
lbc = _np.average([lb0, lb1])
vels = (wv - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= vw*(1+ssize))
x = wv[idx]
y = flux[idx]
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
nfxsig = _np.std(y)
emission = True
if _np.percentile(y, 5) + nfxsig < 1:
emission = False
if _np.percentile(y, 95) - 1.5*nfxsig > 1:
emission = True
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(y[:ssize]), _np.median(y[-ssize:])
else:
medy0, medy1 = _np.average(y[:ssize]), _np.average(y[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
q = 95
if not emission:
q = 5
if a0 is None:
a0 = _np.abs(_np.percentile(y-new_y, q)) - _np.median(y-new_y)
if sig0 is None:
sig0 = (_np.max(x)-_np.min(x))/10.
g1 = _models.Gaussian1D(a0, lb0, sig0)
g1.bounds['amplitude'] = (0, 2*a0)
g1.bounds['mean'] = (lb0*0.9, lb1*0.99)
g2 = _models.Gaussian1D(a0, lb1, sig0)
g1.bounds['amplitude'] = (0, a0)
g2.bounds['mean'] = (lb0*1.01, lb1*1.1)
gg_init = ( g1 + g2 )
# gg_init.verblevel = 0
fitter = _fitting.SLSQPLSQFitter()
gg = fitter(gg_init, x, y-new_y, verblevel=0)
# print(gg.parameters[0], gg.parameters[0+3])
if not allout:
return ( gg.parameters[0]*gg.parameters[2]*_np.sqrt(2*_np.pi),
gg.parameters[0+3]*gg.parameters[2+3]*_np.sqrt(2*_np.pi) )
else:
return gg, new_y, idx
def absLineCalcWave(wv, flux, lbc, vw=1000, ssize=0.05, gauss=False,
allout=False, spcas=None):
r"""
Calculate the line flux (input velocity vector). The `flux` is
NON-normalized.
``ssize`` parameter controns the size of flux that will be evaluated at the
extreme of the input flux array to determine the continuum level.
``vels = (wv - lbc) / lbc * phc.c.cgs * 1e-5 # km/s``
Output in the same flux units times :math:`\Delta v` (both flux and *v*
input units).
"""
vels = (wv - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= vw)
wv = wv[idx]
flux = flux[idx]
if not gauss:
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(flux))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(wv[:ssize]), _np.average(wv[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(flux[:ssize]), _np.median(flux[-ssize:])
else:
medy0, medy1 = ( _np.average(flux[:ssize]),
_np.average(flux[-ssize:]) )
new_y = medy0 + (medy1 - medy0) * (wv - medx0) / (medx1 - medx0)
if spcas is not None:
if spcas == 0:
idx = _np.where(wv > 25.95*1e4)
flux[idx] = new_y[idx]
elif spcas == 1:
idx = _np.where(wv < 25.95*1e4)
# print(len(idx[0]))
flux[idx] = new_y[idx]
base = _np.trapz(new_y, wv)
line = _np.trapz(flux, wv)
if not allout:
return line - base
else:
return line, base, idx
else:
# nflx = linfit(wv, flux)
nflx = flux
nfxsig = _np.std(nflx)
emission = True
if _np.percentile(nflx, 5) + nfxsig < 1:
emission = False
if _np.percentile(nflx, 95) - 1.5*nfxsig > 1:
emission = True
g, newy = gauss_fit(wv, nflx, emission=emission, ssize=ssize)
if not allout:
return g.parameters[0]*g.parameters[2]*_np.sqrt(2*_np.pi)
else:
return g, newy, idx
def ECcalc(vels, flux, ssize=.05, gaussfit=False, doublegf=True):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenados.
If `gaussfit=False`, the single maximum value is taken.
If `gaussfit=True`, then a single (`doublegf=False`) or a double
(`doublegf=True`) Gaussian fit is performed over the line profile to
determine its maximum.
Calcula o topo da emissao da linha, e retorna em que velocidade ela
ocorre.
"""
vels = _np.array(vels)
flux = _np.array(flux)
# if lncore > 0:
# idx = _np.where(_np.abs(vels) < lncore)
# vels = vels[idx]
# flux = flux[idx]
if len(flux) < 5:
return _np.NaN, 0.
if not gaussfit:
idx = _np.where(_np.max(flux) == flux)
if flux[idx][0] < 1:
return _np.NaN, 0.
if len(idx[0]) > 1:
idx = idx[0][0]
return flux[idx][0], vels[idx][0]
else:
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return _np.NaN, 0.
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1
#
ivc = _np.abs(vels - 0).argmin()
if doublegf:
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
try:
p0 = [1., vels[i0], 40.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 40.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
ECs = _np.array([coeff0[0] + 1., coeff1[0] + 1.])
EC = _np.max(ECs)
idx = _np.where(ECs == EC)[0]
# vel = _np.abs(coeff0[1] / 2) + _np.abs(coeff1[1] / 2)
if idx == 0:
vel = coeff0[1]
else:
vel = coeff1[1]
return EC, vel
except ValueError:
return _np.NaN, 0.
else:
try:
p0 = [1., 0, 40.]
coeff0, tmp = _curve_fit(gauss, vels, flux, p0=p0)
EC = coeff0[0] + 1.
return EC, coeff0[1]
except ValueError:
return _np.NaN, 0.
def VRcalc(vels, flux, vw=1000, gaussfit=False, ssize=0.05):
"""
Calcula o PICO para os dois lados (azul/vermelho) da linha, ajustando
a velocidade de repouso (TBD).
"""
# calcula e aplica correcao de vel. repousp
vc = 0.
vels += vc
# faz o teste de tamanho
if len(vels) < 5:
vw = 0
ew0, ew1 = (_np.NaN, _np.NaN)
return ew0, ew1, vc
# corta em vw
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
#
ivc = _np.abs(outvels - 0).argmin()
if not gaussfit:
V = _np.max(normflux[:ivc])
R = _np.max(normflux[ivc:])
else:
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
# print('# Bad profile!')
return 0, 0, vc
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1.
#
ivc = _np.abs(vels - 0).argmin()
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
try:
p0 = [1., vels[i0], 40.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 40.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
V = coeff0[0] + 1.
R = coeff1[0] + 1.
except ValueError:
return 1., 1., vc
return V, R, vc
def PScalc(vels, flux, vc=0., ssize=.05, gaussfit=False):
"""
Calcula peak_separation
`doublegaussfit` = True, do it before and after zero velocity. False, use
maximum (default).
"""
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return _np.NaN, _np.NaN
vels += vc
ivc = _np.abs(vels - 0).argmin()
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
if not gaussfit:
return vels[i0], vels[i1]
else:
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1.
#
try:
p0 = [1., vels[i0], 20.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 20.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
return coeff0[1], coeff1[1]
except ValueError:
print('# PScalc error...')
# print vels[i0], flux[i0], vels[i1], flux[i1]
return 0, 0
def FWHM(vels, flux, halfmax, vmax=350., flxincr=.01):
""" Calc. FWHM (Full-Width at Half Maximum) based on the value of the
Half Maximum
TODO: Gaussfit"""
if len(vels) < 5 or len(flux) < 5:
_warn.warn('# No valid line profile for FHWM')
return _np.NaN
vels = _np.array(vels)
flux = _np.array(flux)
# remove vels bigger than maxvel
idx = _np.where(_np.abs(vels) < vmax)
vels = vels[idx]
flux = flux[idx]
difflx = _np.abs(flux - halfmax)
# remove diff bigger than hmf*halfmax
i = 0
idx = _np.where(difflx < halfmax * flxincr*i)
while len(vels[idx]) < 2:
i += 1
idx = _np.where(difflx < halfmax * flxincr*i)
vels = vels[idx]
difflx = difflx[idx]
#
# difvels: ordered vels based on the flux difference
# idx = _np.argsort(difflx)
# difvels = vels[idx][:4]
#
# difvels: ordered vels closest to the 0 vel.
idx = _np.argsort(_np.abs(vels))
difvels = vels[idx][:2]
return _np.sum(_np.abs(difvels))
def DCcalc(vels, flux, vmax=None, vc=0., ssize=0.05):
"""
Calculo, na presenca de emissao, da profundidade do reverso central.
Se fluxo máximo < 1.01*contínuo, retorna
TODO: gauss fit
Return flux at `vmax` (maximum considered velocity), and flux at `v0`.
Depth of the central reversal is `flux[ivmax] - flux[ivc]`.
"""
if len(flux) < 5:
return _np.NaN, _np.NaN
vels += vc
ivc = _np.abs(vels - 0).argmin()
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return flux[ivc], flux[ivc]
# if a vmax is not given...
if not isinstance(vmax, (int, long, float)):
vmax = _np.abs(flux - _np.max(flux)).argmin()
vmax = vels[vmax]
ivmax = _np.abs(vels - vmax).argmin()
return flux[ivmax], flux[ivc]
def analline(lbd, flux, lbdc, hwidth=1000, verb=True, gaussfit=False,
doublegf=True):
"""
Return the analysis of a line.
Both lbd and flux need to be ordered (a normalization IS FORCED).
lbd,lbdc must have the same unit, and width in km/s is required.
The line will be cutted so that the total DeltaLambda will be 2*width
if `lbdc` <= 0, lbd array is assumed to be a velocity array (in km/s)!
| EXAMPLE: Using sed2data. lbc = 0.6565 (halpha), obs = 1 (width==1000)
| analline(lbd=sed2data[obs,:,2], flux=sed2data[obs,:,3], lbc=lbc)
The EW is the equivalent width in km/s,
EC is the Emission over Continuum ratio,
VR ratio,
peaksep in km/s,
FWHM is the Full-Width at Half Maximum (emission as maximum)
F0 is the depth of rest wavelength normalized to the continuum
OUTPUT: EW, EC, VR, peaksep, FWHM, F0
"""
if lbdc > 0:
vels = (lbd - lbdc) / lbdc * _phc.c.cgs * 1e-5
else:
vels = lbd
# check if the file have the desired info.
if vels[0] > -hwidth * .95 or vels[-1] < hwidth * .95:
if verb:
_warn.warn('spec out of range (wavelength)! Check hwidth!')
return _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN
idx = _np.where(_np.abs(vels) <= hwidth)
vels = vels[idx]
flux = flux[idx]
# Normalization:
flux = linfit(vels, flux)
# Output:
EW = EWcalc(vels, flux, vw=hwidth)
EC, velEC = ECcalc(vels, flux, gaussfit=gaussfit, doublegf=doublegf)
ew0, ew1, vc = VRcalc(vels, flux, vw=hwidth, gaussfit=gaussfit)
if ew1 == 0 or EC is _np.NaN:
VR = 1
else:
VR = ew0 / ew1
if EC is _np.NaN:
peaksep = _np.NaN
else:
vel0, vel1 = PScalc(vels, flux, gaussfit=gaussfit)
peaksep = vel1 - vel0
if peaksep is _np.NaN:
EC = peaksep
VR = peaksep
EC2, F0 = DCcalc(vels, flux, vmax=velEC)
# depthcent = EC2 - F0
if EC2 < 1:
EC2 = 1.
fwhm = FWHM(vels, flux, (EC2 + F0) / 2., vmax=_np.abs(velEC))
else:
fwhm = FWHM(vels, flux, EC/2, vmax=hwidth)
return EW, EC, VR, peaksep, fwhm, F0
def kurlog(file=None, output=None):
""" Generate a list of teff and logg present in a Kurucz file.
If output is not specified, it is saved as `file`+.log """
if file is None:
file = _os.path.join(_hdt.hdtpath(), 'refs', 'fp00k0.pck')
teffs = []
loggs = []
fp = open(file)
for i, line in enumerate(fp):
if line.find('TEFF') > -1:
teffs += [float(line.split()[1])]
loggs += [float(line.split()[3])]
fp.close()
return teffs, loggs
def kuruczflux(teff, logg, wavrange=None):
""" Return fluxes from a Kurucz model.
Fluxes are in ergs/cm**2/s/hz/ster and wavelength in nm (wavrange must be
in nm).
As tabelas do Kurucz sao erg/s/sr/cm2/Hz. Entao, tem q multiplicar 4pi para
ter o fluxo observado. Abaixo, a conversao das unidades Kurucz para
erg/s/cm2/A usuais.
# erg/s/sr/cm2/Hz:
lK15k, K15k, info = spt.kuruczflux(5777, 3., range=[100,1000])
lK15k*= 1e1 #Ang
K15k = 2.99792458E+18*K15k*(lK15k)**-2*4*np.pi #erg/s/cm2/A
OUTPUT: wv, flux, info"""
kurfile = _os.path.join(_hdt.hdtpath(), 'refs', 'fp00k0.pck')
kurwvlines = (174 - 22)
kurflxcol = 10
# wave
read = _phc.readrange(kurfile, 22, 22 + kurwvlines)
wave = _np.array([val for line in read for val in line.split()],
dtype=float)
# choose best
bestT = _np.inf
bestg = _np.inf
fp = open(kurfile)
for i, line in enumerate(fp):
if line.find('TEFF') > -1:
readT = float(line.split()[1])
if _np.abs(readT - teff) <= _np.abs(bestT - teff):
bestT = readT
readg = float(line.split()[3])
if _np.abs(readg - logg) <= _np.abs(bestg - logg):
bestg = readg
i0 = i + 1
fp.close()
best = [bestT, bestg]
# read best flux
read = _phc.readrange(kurfile, i0, i0 + kurwvlines)
flux = _np.array([val for line in read for val in
(line[i:i + kurflxcol] for i in range(0, len(line) - 1, kurflxcol))],
dtype=float)
# cut range
if wavrange is None:
return wave, flux, best
else:
idx = _np.where((wave > wavrange[0]) & (wave < wavrange[-1]))
return wave[idx], flux[idx], best
def plot_all(fs2list, obsl=None, fmt=['png'], out=None, lbc=.6564,
hwidth=1000., solidfiles=True, xax=0, philist=[0], figname=None,
nolabels=False, obsidx=False):
r""" plot_all-like routine
``obsl`` list, in degrees. It will find the closest values. It the find
:math:`\Delta\theta > 3^\circ`, a warning message is displayed. The ``obs``
index can be used if ``obsidx = True``.
``solinefiles`` keep solid lines for files (changes only colors), and
change line shapes between observers. If ``False``, do the opposite.
"""
if isinstance(fs2list, _strtypes):
fs2list = [fs2list]
if not isinstance(obsl, list) and obsl is not None:
_warn.warn('Wrong `obsl` format (None or list)', stacklevel=2)
return
fig = _plt.figure(figsize=(9, 9))
lins, cols = (3, 2)
gs = _gridspec.GridSpec(lins, cols)
gs.update(hspace=0.25)
axt = _plt.subplot(gs[0, 1])
ax0 = _plt.subplot(gs[1, 0])
ax1 = _plt.subplot(gs[1, 1])
ax2 = _plt.subplot(gs[2, 0])
ax3 = _plt.subplot(gs[2, 1])
xtitle = 'radial scale'
for f in fs2list:
m = _inp.HdustMod(f)
tfile = _os.path.join(m.proj, m.modn, m.modn+m.suf+'*avg.temp')
tfile = _glob(tfile)
if len(tfile) > 0:
npt, rplus, lev = (0, 0, 0)
tfile.sort()
tfile = tfile[-1]
ncr, ncmu, ncphi, nLTE, nNLTE, Rstar, Ra, beta, data, pcr, pcmu, \
pcphi = _hdt.readtemp(tfile)
for phiidx in range(0, len(philist)):
icphi = philist[phiidx]
x = data[0, :, 0, icphi]
if (xax == 0):
x = _np.log10(x / Rstar - 1.)
xtitle = r'$\log_{10}(r/R_*-1)$'
elif (xax == 1):
x = x / Rstar
xtitle = r'$r/R_*$'
elif (xax == 2):
x = 1. - Rstar / x
xtitle = r'$1-R_*/r$'
y = data[3 + lev, :, ncmu / 2 + npt + rplus, icphi]
y = y / 1000.
axt.plot(x, y, 'o-')
fs2d = _hdt.readfullsed2(f)
iobs = range(len(fs2d))
if obsl is not None:
if not obsidx:
iobs = [_phc.find_nearest(_np.arccos(fs2d[:, 0, 0])*180/_np.pi,
ob, idx=True) for ob in obsl]
else:
iobs = obsl
for ob in iobs:
obfmt = r'{:.1f}$^\circ$, {:.1f}$^\circ$'.format(_np.arccos(
fs2d[ob, 0, 0])*180/_np.pi, _np.arccos(fs2d[ob, 0, 1]))
if solidfiles:
pdict = {'color': _phc.cycles(fs2list.index(f)),
'dashes': _phc.dashes(iobs.index(ob))}
else:
pdict = {'dashes': _phc.dashes(fs2list.index(f)),
'color': _phc.cycles(iobs.index(ob))}
ax0.plot(fs2d[ob, :, 2], fs2d[ob, :, 3],
label=_os.path.basename(f), **pdict)
ax1.plot(fs2d[ob, :, 2], fs2d[ob, :, 3],
label=obfmt, **pdict)
ax2.plot(fs2d[ob, :, 2], fs2d[ob, :, 7]*100, **pdict)
ax3.plot(*lineProf(fs2d[ob, :, 2], fs2d[ob, :, 3], lbc=lbc,
hwidth=hwidth), **pdict)
axt.set_xlabel(xtitle, labelpad=1)
axt.set_ylabel(r'Temperature (10$^3$ K)')
ax0.set_xlim([.37, 1.])
ax0.autoscale(axis='y', tight=True)
ax0.set_yscale('log')
ax0.set_xlabel(r'$\mu$m')
ax0.set_ylabel(r'$\lambda F_\lambda/F$')
ax1.set_xlim([1., 100.])
ax1.autoscale(axis='y', tight=True)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel(r'$\mu$m', labelpad=1)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.yaxis.set_ticks_position('both')
ax1.set_ylabel(r'$\lambda F_\lambda/F$')
ax2.set_xlim([.37, .9])
ax2.autoscale(axis='y', tight=True)
ax2.set_xlabel(r'$\mu$m')
ax2.set_ylabel('P (%)')
ax3.set_xlim([-hwidth, hwidth])
ax3.set_xlabel(r'km/s')
ax3.yaxis.tick_right()
ax3.yaxis.set_label_position("right")
ax3.yaxis.set_ticks_position('both')
ax3.set_ylabel('Normalized Flux')
if not nolabels:
ax1.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=9,
labelspacing=0.05)
if len(fs2list) > 1 and not nolabels:
ax0.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=8,
labelspacing=0.05)
_phc.savefig(fig, fmt=fmt, figname=figname) # figname='outname')
return
def splitKurucz(filen, path=None):
"""
Split atmospheric Kurucz file (e.g., 'ap00k0.dat') into individual models.
INPUT: file, path (strings)
OUTPUT: *files written
"""
if path is None:
path = _os.getcwd()
allk = _np.loadtxt(filen, dtype=str, delimiter='\n')
src = _os.path.splitext(_os.path.split(filen)[1])[0]
if not _os.path.exists(src):
_os.mkdir(src)
src = _os.path.join(src, src)
for i in range(0, len(allk) - 1):
if 'EFF' in allk[i]:
iref = i
teff = int(allk[i].split()[1][:-1])
logg = float(allk[i].split()[3][:-3])
elif 'DECK6 72' in allk[i]:
allk[i] = allk[i].replace('DECK6 72', 'DECK6 71')
elif 'EFF' in allk[i + 1]:
_np.savetxt(src+'tef%05dg%.1f.dat' % (teff, logg),
allk[iref:i + 1], fmt='%s')
_np.savetxt(src+'tef%05dg%.1f.dat' % (teff, logg), allk[iref:], fmt='%s')
return
def writeFits(flx, lbd, extrahead=None, savename=None, verbose=False,
path=None, lbdc=None, externhd=None):
""" Write a 1D spectra FITS.
| INPUT: flux array, lbd array, extrahead flag+info, save name.
| - lbd array: if len(lbd)==2: lbd = [CRVAL1, CDELT1]
| else: CDELT1 = (lbd[-1]-lbd[0])/(len(lbd)-1)
| CRVAL1 = lbd[0]
| WARNING: lbd must be in ANGSTROMS (FITS default). It can also be
| velocities. In this case, it must be in km/s and lbdc is given in
| ANGSTROM.
| - extrahead: matrix (n,2). Example: [['OBJECT','Achernar'], ['COMMENT',
| 'value']]
`externhd` = copy the header from an external file.
OUTPUT: write FITS file.
"""
if path is None or path == '':
path = _os.getcwd()
if path[-1] != ['/']:
path += '/'
if lbdc is not None:
lbd = (lbd / _phc.c.cgs * 1e5 + 1) * lbdc
hdu = _pyfits.PrimaryHDU(flx)
hdulist = _pyfits.HDUList([hdu])
if externhd is not None:
extf = _pyfits.open(externhd)
hdulist[0].header = extf[0].header
hdulist[0].header['BZERO'] = 0.
hdulist[0].header['CRVAL1'] = lbd[0]
if len(lbd) == 2:
hdulist[0].header['CDELT1'] = lbd[1]
else:
hdulist[0].header['CDELT1'] = (lbd[-1] - lbd[0]) / (len(lbd) - 1)
if extrahead is not None:
for e in extrahead:
hdulist[0].header[e[0]] = e[1]
if savename is None:
savename = 'spec_{0}'.format(_phc.dtflag())
if savename.find('.fit') == -1:
savename += '.fits'
hdu.writeto(path + savename, overwrite=True)
if verbose:
print('# FITS file {0}{1} saved!'.format(path, savename))
return
def averagespecs(speclist, n=999, path='', objname='OBJECT'):
""" Average specs taken in the same MJD, in groups of approx. `n`
elements.
OUTPUT: Files written. """
if len(path) > 0 and path[-1] != '/':
path += '/'
speclist = _np.array(speclist)
obsdates = []
for sp in speclist:
data = loadfits(sp)
obsdates.append(data[2])
obsdates = _np.array(obsdates)
# Sorting things
idx = _np.argsort(obsdates)
speclist = speclist[idx]
obsdates = obsdates[idx]
# Same day
iMJD = []
for m in obsdates:
iMJD.append(divmod(m, 1)[0])
idxMJD = _np.unique(iMJD)
# Do the avgs based on the MJD
for i in idxMJD:
idx = _np.where(iMJD == i)
N = len(speclist[idx])
for j in _phc.splitequal(N/n, N):
fidx = speclist[idx][j[0]:j[1]]
data = loadfits(fidx[0])
wl = data[0]
newdate = _np.average( obsdates[idx][j[0]:j[1]] )
MJD = int(divmod(newdate, 1)[0])
MJDfrac = int(round( divmod(newdate, 1)[1]*10000 ))
fluxes = _np.zeros(len(wl))
for f in fidx:
data = loadfits(f)
fluxes += _np.interp(wl, data[0], data[1])
flx = fluxes/len(fidx)
outname = 'alpEri_PUCHEROS_VIS_{0}_{1:04d}_avg.fits'.format(MJD,
MJDfrac)
writeFits( flx, wl, savename=outname, path=path, extrahead=[
['OBJECT', objname], ['Comment', 'Averaged from {0} spectra'.
format(len(fidx))], ['MJD-OBS', newdate] ] )
return
def cardelli(lbd, flux, ebv=0., Rv=3.1):
"""
Milky Way Extinction law from Cardelli et al. 1989
`lbd` must be in microns.
OUTPUT: Corrected flux.
"""
x = 1. / _np.array(lbd) # CCM x is 1/microns
a, b = _np.ndarray(x.shape, x.dtype), _np.ndarray(x.shape, x.dtype)
if any((x < 0.3) | (10 < x)):
raise ValueError('Some wavelengths outside CCM 89 extinction curve ' +
'range')
irs = (0.3 <= x) & (x <= 1.1)
opts = (1.1 <= x) & (x <= 3.3)
nuv1s = (3.3 <= x) & (x <= 5.9)
nuv2s = (5.9 <= x) & (x <= 8)
fuvs = (8 <= x) & (x <= 10)
# CCM Infrared
a[irs] = .574 * x[irs]**1.61
b[irs] = -0.527 * x[irs]**1.61
# CCM NIR/optical
a[opts] = _np.polyval((.32999, -.7753, .01979, .72085, -.02427, -.50447,
.17699, 1), x[opts] - 1.82)
b[opts] = _np.polyval((-2.09002, 5.3026, -.62251, -5.38434, 1.07233,
2.28305, 1.41338, 0), x[opts] - 1.82)
# CCM NUV
a[nuv1s] = 1.752 - .316 * x[nuv1s] - 0.104 / ((x[nuv1s] - 4.67)**2 + .341)
b[nuv1s] = -3.09 + 1.825 * x[nuv1s] + 1.206 / ((x[nuv1s] - 4.62)**2 + .263)
y = x[nuv2s] - 5.9
Fa = -.04473 * y**2 - .009779 * y**3
Fb = -.2130 * y**2 - .1207 * y**3
a[nuv2s] = 1.752 - .316 * x[nuv2s] - 0.104 / \
((x[nuv2s] - 4.67)**2 + .341) + Fa
b[nuv2s] = -3.09 + 1.825 * x[nuv2s] + \
1.206 / ((x[nuv2s] - 4.62)**2 + .263) + Fb
# CCM FUV
a[fuvs] = _np.polyval((-.070, .137, -.628, -1.073), x[fuvs] - 8)
b[fuvs] = _np.polyval((.374, -.42, 4.257, 13.67), x[fuvs] - 8)
AlbAv = a + b / Rv
return flux * 10**(-AlbAv * Rv * ebv / 2.5)
def fitzpatrick(wave, flux, ebv, Rv=3.1, LMC2=False, AVGLMC=False):
"""
Deredden a flux vector using the Fitzpatrick (1999) parameterization
Parameters
----------
wave : array
Wavelength in Angstrom
flux : array
Calibrated flux vector, same number of elements as wave.
ebv : float, optional
Color excess E(B-V). If a positive ebv is supplied,
then fluxes will be dereddened rather than reddened.
The default is 3.1.
AVGLMC : boolean
If True, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by
Misselt et al. (1999, ApJ, 515, 128). The default is
False.
LMC2 : boolean
If True, the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither `AVGLMC` nor `LMC2` will alter the default value
of Rv, which is poorly known for the LMC.
Returns
-------
new_flux : array
Dereddened flux vector, same units and number of elements
as input flux.
Notes
-----
.. note::
This function was ported from the IDL Astronomy User's Library.
The following five parameters allow the user to customize
the adopted extinction curve. For example, see Clayton et al. (2003,
ApJ, 588, 871) for examples of these parameters in different
interstellar environments.
x0 - Centroid of 2200 A bump in microns (default = 4.596)
gamma - Width of 2200 A bump in microns (default =0.99)
c3 - Strength of the 2200 A bump (default = 3.23)
c4 - FUV curvature (default = 0.41)
c2 - Slope of the linear UV extinction component
(default = -0.824 + 4.717/R)
c1 - Intercept of the linear UV extinction component
(default = 2.030 - 3.007*c2
"""
# x = 10000./ wave # Convert to inverse microns
x = 1. / wave # microns
curve = x * 0.
# Set some standard values:
x0 = 4.596
gamma = 0.99
c3 = 3.23
c4 = 0.41
c2 = -0.824 + 4.717 / Rv
c1 = 2.030 - 3.007 * c2
if LMC2:
x0 = 4.626
gamma = 1.05
c4 = 0.42
c3 = 1.92
c2 = 1.31
c1 = -2.16
elif AVGLMC:
x0 = 4.596
gamma = 0.91
c4 = 0.64
c3 = 2.73
c2 = 1.11
c1 = -1.28
# Compute UV portion of A(lambda)/E(B-V) curve using FM fitting function
# and R-dependent coefficients
xcutuv = _np.array([10000.0 / 2700.0])
xspluv = 10000.0 / _np.array([2700.0, 2600.0])
iuv = _np.where(x >= xcutuv)[0]
N_UV = len(iuv)
iopir = _np.where(x < xcutuv)[0]
Nopir = len(iopir)
if (N_UV > 0):
xuv = _np.concatenate((xspluv, x[iuv]))
else:
xuv = xspluv
yuv = c1 + c2 * xuv
yuv = yuv + c3 * xuv**2 / ((xuv**2 - x0**2)**2 + (xuv * gamma)**2)
yuv = yuv + c4 * (0.5392 * (_np.maximum(xuv, 5.9) - 5.9)**2 + 0.05644 * (
_np.maximum(xuv, 5.9) - 5.9)**3)
yuv = yuv + Rv
yspluv = yuv[0:2] # save spline points
if (N_UV > 0):
curve[iuv] = yuv[2::] # remove spline points
# Compute optical portion of A(lambda)/E(B-V) curve
# using cubic spline anchored in UV, optical, and IR
xsplopir = _np.concatenate(([0], 10000.0 / _np.array([26500.0, 12200.0,
6000.0, 5470.0, 4670.0, 4110.0])))
ysplir = _np.array([0.0, 0.26469, 0.82925]) * Rv / 3.1
ysplop = _np.array((_np.polyval([-4.22809e-01, 1.00270, 2.13572e-04][::-1],
Rv ), _np.polyval([-5.13540e-02, 1.00216, -7.35778e-05][::-1], Rv ),
_np.polyval([ 7.00127e-01, 1.00184, -3.32598e-05][::-1], Rv ),
_np.polyval([ 1.19456, 1.01707, -5.46959e-03, 7.97809e-04,
-4.45636e-05][::-1], Rv ) ))
ysplopir = _np.concatenate((ysplir, ysplop))
if (Nopir > 0):
tck = _interpolate.splrep(_np.concatenate((xsplopir, xspluv)),
_np.concatenate((ysplopir, yspluv)), s=0)
curve[iopir] = _interpolate.splev(x[iopir], tck)
# Now apply extinction correction to input flux vector
curve *= -ebv
return flux * 10.**(0.4 * curve)
def sort_specs(specs, path=None):
""" Specs in an (N,2) array, where specs[:,0] are the files paths and
specs[:,1] the instrument name.
Return ordered_specs"""
if path is not None:
if path[-1] != '/':
path += '/'
else:
path = ''
nsp = _np.shape(specs)[0]
MJDs = _np.zeros(nsp)
specs = _np.array(specs)
lims = [_np.inf, -_np.inf]
for i in range(nsp):
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(path +
specs[i][0])
MJDs[i] = MJD
if MJDs[i] < lims[0]:
lims[0] = MJDs[i]
if MJDs[i] > lims[1]:
lims[1] = MJDs[i]
return specs[MJDs.argsort()], lims
def convgaussFunc(wl, flx, lbc, hwidth=1000., convgauss=0., frac=0., ssize=.05,
wlout=False):
""" Do a Gaussian convolution of a given Line Profile with a Gaussian.
`wl`, `flx`, `lbc`: wavelenght and flux of the spectrum containing the
line, and its value.
`hwidth`, `ssize`: width to be keeped around the line (km/s), and the
region (in percentage) where the continuum level will be evaluted around
the selected region.
`convgauss`: if bigger then 0., do the convolution. Its values is the sigma
of the gaussian conv. profile (in km/s).
`frac`: controls the intensity of the convolution. `frac`=0 means pure
profile output and `frac`=1 a pure gaussian output with the same EW value.
`wlout`: returns a wavelength array instead of a velocity array (standar)
OUTPUT: vel/wl, flux (arrays)
"""
(x, yo) = lineProf(wl, flx, lbc=lbc, hwidth=hwidth + 3 * convgauss,
ssize=ssize)
y1 = yo
y2 = 0.
if convgauss > 0 and frac > 0:
step = _np.abs(_np.min([x[j + 1] - x[j] for j in range(len(x) - 1)]))
xn = _np.arange(-hwidth - 3 * convgauss,
hwidth + 3 * convgauss + step, step)
cf = _phc.normgauss(convgauss, x=xn)
yo = _np.interp(xn, x, yo)
x = xn
y1 = yo * (1 - frac)
y2 = _np.convolve(yo * frac, cf / _np.trapz(cf), 'same')
if wlout:
x = (x / _phc.c.cgs * 1e5 + 1) * lbc
return x, y1 + y2
# def gaussfold(wl, flx, sig, lbc, hwidth=1000., ssize=.05):
# """Translation from gaussfold.pro"""
# (x, yo) = lineProf(wl, flx, lbc=lbc, hwidth=hwidth+3*sig, ssize=ssize)
# x = (x / _phc.c.cgs * 1e5 + 1) * lbc
# lammax = _np.max(x)
# lammin = _np.min(x)
# dlambda = sig / 17.
# interlam = lammin + dlambda * _np.arange( (lammax-lammin)/dlambda+1 )
# interflux = _np.interp( interlam, wl, flx )
# fwhm_pix = sig / dlambda
# window = fwhm_pix(17*fwhm_pix).astype(int)
# gauss = _phc.psf_gaussian(window, sig=fwhm_pix, norm=True, ndim=1)
# fold = _phc.convol
# fluxfold = _np.interp( lam, interlam, fold )
# _warn('# Function not implemented!!')
# return None
def cutpastrefspec(ivl, iflx, irefvl, ireflx, hwidth, ssize=.05):
""" Cut and paste a given line profile into a reference line profile.
Both profiles (with any resolution) must be normalized and given in vel.
It was designed to solve the problem of Achernar's Halpha line wings
problem and it works like this: given a reference profile (`refvl`,
`reflx`), the selected profile will be cutted at the `hwidth` position
and them pasted in the corresponding position (and intensity level) of
the reference spectrum.
OUTPUT: refvl, reflx
"""
flx = _np.interp(irefvl, ivl, iflx)
i0 = _np.abs(irefvl + hwidth).argmin()
i1 = _np.abs(irefvl - hwidth).argmin()
ssize = int(ssize * len(flx))
if ssize == 0:
ssize = 1
refav = _np.average( ireflx[i0 - ssize / 2:i0 + ssize / 2 + 1] ) / 2. + \
_np.average( ireflx[i1 - ssize / 2:i1 + ssize / 2 + 1] ) / 2.
av = _np.average( flx[i0 - ssize / 2:i0 + ssize / 2 + 1] ) / 2. + \
_np.average( flx[i1 - ssize / 2:i1 + ssize / 2 + 1] ) / 2.
flx += refav - av
reflx = _np.array(ireflx).copy()
reflx[i0:i1 + 1] = flx[i0:i1 + 1]
return irefvl, reflx
def load_specs_fits(speclist, ref, lbc, lncore=None, hwidth=None,
gaussfit=False, plotcut=0):
""" Load a list of specs and do the *line core cut & paste*
`lncore`: cut and paste hwidth of the line center. It can be None, and
must be < hwidth. If hwidth is None, it is assumed to be 1000 km/s.
`speclist` : ['path+file.fits', ...]
`ref`: reference spectra to do the cut & paste
`plotcut`: if plotcut > 0, save the cutted spectra in steps of this
variable.
OUTPUT: dtb_obs
"""
if hwidth is None:
hwidth = 1000.
# do core cut?
docore = lncore < hwidth
if lncore is None:
docore = False
# load ref
refwl, reflx = loadfits(ref[0])[0:2]
refvl, reflx = lineProf(refwl, reflx, lbc=lbc)
# load specs
dtb_obs = Spec(lbc=lbc, hwidth=hwidth, gaussfit=gaussfit)
for i in range(_np.shape(speclist)[0]):
print(speclist[i])
dtb_obs.loadspec(speclist[i])
vl, flx = lineProf(dtb_obs.wl, dtb_obs.flux, lbc=lbc)
if docore:
cuted = cutpastrefspec(vl, flx, refvl, reflx, lncore)
dtb_obs.flux = cuted[1]
dtb_obs.wl = (cuted[0]/_phc.c.cgs*1e5+1)*lbc
(dtb_obs.EW, dtb_obs.EC, dtb_obs.VR, dtb_obs.peaksep,
dtb_obs.depthcent, dtb_obs.F0) = analline(dtb_obs.wl,
dtb_obs.flux, dtb_obs.lbc, hwidth=lncore, verb=False,
gaussfit=dtb_obs.gaussfit)
else:
(dtb_obs.EW, dtb_obs.EC, dtb_obs.VR, dtb_obs.peaksep,
dtb_obs.depthcent, dtb_obs.F0) = analline(dtb_obs.wl,
dtb_obs.flux, dtb_obs.lbc, hwidth=hwidth, verb=False,
gaussfit=dtb_obs.gaussfit)
dtb_obs.addspec()
# complementary plot
if plotcut > 0 and docore:
fig0, ax = _plt.subplots()
for i in range(_np.shape(speclist)[0]):
dtb_obs.loadspec(speclist[i])
vl, flx = lineProf(dtb_obs.wl, dtb_obs.flux, lbc=lbc)
cuted = cutpastrefspec(vl, flx, refvl, reflx, lncore)
if i % plotcut == 0:
ax.plot(cuted[0], cuted[1])
_phc.savefig(fig0)
return dtb_obs
def plot_spec_info(speclist, dtb_obs, mAEW=False, mgray=None):
""" Standard plot of the Spec class (EW, E/C, V/R, peak-sep., FWHM, F0)
OUTPUT: figure (fig pyplot)
"""
if mAEW:
dtb_obs.data[:, 1] *= 1000*dtb_obs.lbc/_phc.c.cgs*1e5
# Legend, Markers and colors idx...
instm = list(_np.unique(speclist[:, 1]))
# coridx = [ phc.cycles(instm.index(i)) for i in speclist[:, 1]]
cores = _phc.gradColor(range(len(instm)), cmapn='inferno')
coridx = [ cores[instm.index(i)] for i in speclist[:, 1] ]
coridx = _np.array(coridx)
mkidx = [ _phc.cycles(instm.index(i), 'mk') for i in speclist[:, 1]]
mkidx = _np.array(mkidx)
# Plots
fig = _plt.figure()
lins, cols = (7, 1)
gssteps = [slice(0, 2), 2, 3, 4, 5, 6]
gs = _gridspec.GridSpec(lins, cols)
axs = [_plt.subplot(gs[g, :]) for g in gssteps]
# EW
axs[0].invert_yaxis()
axs[-1].set_xlabel('Julian date - 2400000.5')
ylabels = [u'EW (m\u00c5)', 'E/C', 'V/R', ('pk. sep.'+'\n'+'(km/s)'),
'FWHM'+'\n'+'(km/s)', r'F${\lambda 0}$']
for i, ax in enumerate(axs):
# binned
x, y = _phc.bindata(dtb_obs.data[:, 0], dtb_obs.data[:, i+1])
# yi = _savgol(y, 3, 1)
ax.plot(x, y, color='gray', zorder=0)
# points
for uniquem in set(mkidx):
idx = _np.where(mkidx == uniquem)
ax.plot(dtb_obs.data[:, 0][idx], dtb_obs.data[:, i+1][idx],
color=coridx[idx][0], marker=uniquem, ls='')
ax.set_ylabel(ylabels[i])
#
xlim = axs[0].get_xlim()
axs[2].plot(xlim, [1, 1], ls=":", color='k', zorder=1)
for i in range(1, len(axs)):
# ax.locator_params(axis='y', nbins=4)
axs[i].yaxis.set_major_locator(_MaxNLocator(nbins=4, prune='upper'))
if i in [1, 2, 3]:
axs[i].get_yticklabels()[-1].set_visible(False)
for ax in axs[:-1]:
ax.set_xticklabels([])
# Legend
for i in range(len(instm)):
# axs[0].plot([np.NaN], [np.NaN], label=instm[i], color=phc.cycles(i),
# marker=phc.cycles(i, 'mk'), ls='')
axs[0].plot([_np.NaN], [_np.NaN], label=instm[i], color=cores[i],
marker=_phc.cycles(i, 'mk'), ls='')
axs[0].legend(loc='best', fancybox=True, framealpha=0.5, fontsize=8,
labelspacing=0.05, ncol=2)
# bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.
fig.subplots_adjust(hspace=0.01)
# Gray
for ax in axs:
ax.set_xlim(xlim)
if mgray is not None:
ylim = ax.get_ylim()
rect = _mpatches.Rectangle([mgray[0], ylim[0]],
mgray[1]-mgray[0], ylim[1]-ylim[0], ec="gray", fc='gray',
alpha=0.5, zorder=1)
ax.add_patch(rect)
if len(mgray) == 4:
if mgray is not None:
ylim = ax.get_ylim()
rect = _mpatches.Rectangle([mgray[2], ylim[0]],
mgray[3]-mgray[2], ylim[1]-ylim[0], ec="gray", fc='gray',
alpha=0.5, zorder=1, hatch='//')
ax.add_patch(rect)
return fig
# TODO: Check if obsolete
def normalize_range(lb, spec, a, b):
"""This function is obsolete and must be removed.
Still here for compatibility issues.
"""
a2 = (spec[b] - spec[a]) / (lb[b] - lb[a])
a1 = spec[a] - a2 * lb[a]
return spec / (a1 + a2 * lb)
def normalize_spec(lb, flx, q=2, diff=0.03, perc=0, nlbp=50):
""" Normalize a spectrum using the non-parametric regression algorithm of
Local Polynomial Kernel (order=``q``).
If perc > 0, a "percentile filter" is applyed to the spectrum (divided in
nlbp bins).
INPUT: lb, flx
OUTPUT: norm_flx
"""
def linear_model(x, *coef):
result = 0
for i in range(len(coef)):
result += coef[i]*x**i
return result
if perc <= 0:
Initial_guess = [0.,0.]
coef1, cov1 = _curve_fit(linear_model, lb, flx, Initial_guess)
idx0 = _np.where(flx != 0)
ilb = lb[idx0]
iflx = flx[idx0]
idxi = _np.where(_np.abs(linear_model(ilb, *coef1)/iflx-1) < diff)
xsi = ilb[idxi]
ysi = iflx[idxi]
else:
xsi, ysi = _phc.bindata(lb, flx, nbins=nlbp, perc=perc)
xsi = xsi.reshape(-1,1)
Initial_guess = _np.zeros(q+1)
coef2, cov2 = _curve_fit(linear_model, xsi, ysi, Initial_guess)
k2 = linear_model(lb, *coef2)
return flx/k2
def renorm(vl, y):
""" Renormalize ``y`` so that the equivalent width is preserved when the
continuum is shifted to 1.
"""
ext = _np.mean([y[0], y[-1]])
a0 = _np.trapz(y, vl)
A = ((a0-_np.trapz(_np.tile(1, len(vl)), vl))/
(a0-_np.trapz(_np.tile(ext, len(vl)), vl)))
B = 1-A*ext
return A*y+B
def normEW(vl, y, area=None):
""" Normalize ``y`` curve to have a specific area. If ``area is None``,
then the normalized equivalent width is preserved.
"""
if area is None:
area = _np.trapz(linfit(vl, y), vl)
y0 = linfit(vl, y)-1
a1 = _np.trapz(y0, vl)
a0 = _np.trapz(_np.tile([1], len(vl)), vl)
f = (area-a0)/a1
return f*y0+1
def checksubdirs(path, star, lbc, hwidth=1000, showleg=True, plots=False):
"""
Faz o que tem que fazer.
"""
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
fig = _plt.figure()
ax = fig.add_subplot(111)
spdtb = Spec()
spdtb.lbc = lbc
spdtb.hwidth = 1000.
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
for cal in scal:
spdtb.loadspec(cal)
spdtb.addspec()
if not _np.isnan(spdtb.EW):
if plots:
spdtb.plotspec()
vels = (spdtb.wl - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= hwidth)
flux = linfit(vels[idx], spdtb.flux[idx])
vels = vels[idx]
leg = spdtb.MJD
ax.plot(vels, flux, label=leg, alpha=0.7,
color=_phc.colors[_np.mod(spdtb.count,
len(_phc.colors))])
else:
print('# Data not reduced for %s at %s!' % (star, night))
ax.set_xlim([-hwidth, hwidth])
ax.set_ylim([-1, 5])
if showleg:
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.savefig('{0}/{1}_at_{2}.png'.format(_outfold, star, lbc))
_plt.close()
spdtb.savedata(datafile='{0}/{1}.txt'.format(_outfold, star),
metafile='{0}/meta_{1}.txt'.format(_outfold, star))
return
def VREWcalc(vels, flux, vw=1000):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenad_os.
Calcula o ew para os dois lados (azul/vermelho) da linha, ajustando
a velocidade de repouso (TBD).
"""
# calcula e aplica correcao de vel. repousp
vc = 0.
vels += vc
# corta em vw, e faz o teste de tamanho
if len(vels) < 5:
vw = 0
if vw > 0:
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
else:
ew0 = 0.
ew1 = 0.
return ew0, ew1, vc
#
ivc = _np.abs(outvels - 0).argmin()
ew0 = 0.
for i in range(0, ivc):
dl = outvels[i + 1] - outvels[i]
ew0 += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
ew1 = 0.
for i in range(ivc, len(outvels) - 1):
dl = outvels[i + 1] - outvels[i]
ew1 += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
return ew0, ew1, vc
def normcontinuum_std(flux, ssize=.05):
"""
Assumes that the `flux` vector is normalized.
`ssize` is the percentage of the flux vector to be sampled as continuum
(0-1.); default=0.05.
It returns the standard deviation of the normalized continuum (around 1.0).
"""
# averaging borders
ny = _np.array(flux)[:]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(ny))
if ssize == 0:
ssize = 1
if ssize > 1:
continuum = _np.concatenate( (ny[:ssize], ny[-ssize:]) )
if _np.abs(1-_np.average(continuum)) < 0.05:
return _stt.mad( continuum )
# Whole averaging
mp = ssize*100
pp = ssize*100
p50 = _pos(ny, 1.)
if p50 > 100-pp:
_warn.warn('The continuum of this spec is too low! <1: '
'Is is nomalized?')
pp = 100-p50
elif p50 < mp:
_warn.warn('The continuum of this spec is too high! >1: '
'Is is nomalized?')
mp = p50
p45 = _np.percentile(ny, p50-mp)
p55 = _np.percentile(ny, p50+pp)
continuum = ny[_np.where((ny > p45) & (ny < p55))]
return _stt.mad(continuum)
def plotSpecData(dtb, limits=None, civcfg=[1, 'm', 2013, 1, 1],
fmt=['png'], ident=None, lims=None, setylim=False, addsuf=''):
""" Plot spec class database `vs` MJD e civil date
Plot originally done to London, Canada, 2014.
INPUT: civcfg = [step, 'd'/'m'/'y', starting year, month, day]
`lims` sequence: 'EW', 'E/C', 'V/R', 'Pk. sep. (km/s)', 'E-F0', 'F0'
`lims` = [[-2,4+2,2],[1.,1.4+.1,0.1],[.6,1.4+.2,.2],[0,400+100,100],
[.30,.45+.05,.05],[0.6,1.20+.2,.2]]
If `lims` is defined, `setylim` can be set to True.
OUTPUT: Written image."""
if isinstance(dtb, _strtypes):
print('# Loading dtb {0}'.format(dtb))
dtb = _np.loadtxt(dtb)
if ident is not None:
idref = _np.unique(ident)
ylabels = ['EW', 'E/C', 'V/R', 'Pk. sep. (km/s)', 'E-F0', 'F0']
fig, ax = _plt.subplots(6, 1, sharex=True, figsize=(9.6, 8))
icolor = 'blue'
for i in range(1, len(ylabels) + 1):
ax[i - 1].plot(*_phc.bindata(dtb[:, 0], dtb[:, i], 20))
for j in range(len(dtb[:, 0])):
if ident is not None:
idx = _np.where(ident[j] == idref)[0]
icolor = _phc.colors[idx]
ax[i - 1].plot(dtb[j, 0], dtb[j, i], 'o', color=icolor)
ax[i - 1].set_ylabel(ylabels[i - 1])
if lims is not None:
if lims[i - 1][-1] != 0:
ax[i - 1].set_yticks(_np.arange(*lims[i - 1]))
if setylim:
ax[i - 1].set_ylim([ lims[i - 1][0], lims[i - 1][1] ])
if ident is not None:
for id in idref:
idx = _np.where(id == idref)[0]
icolor = _phc.colors[idx]
ax[0].plot([], [], 'o', color=icolor, label=id)
ax[0].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
prop={'size': 6})
if limits is None:
# limits = ax[0].get_xlim()
limits = [dtb[0, 0], dtb[-1, 0]]
else:
ax[0].set_xlim(limits)
mjd0, mjd1 = limits
ax[5].set_xlabel('MJD')
ticks = _phc.gentkdates(mjd0, mjd1, civcfg[0], civcfg[1],
dtstart=_dt.datetime(civcfg[2], civcfg[3], civcfg[4]).date())
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
ticks]
# ticks = [dt.datetime(*jdcal.jd2gcal(jdcal.MJD_0, date)[:3]).date() for \
# date in ax[0].get_xticks()]
# mjdticks = ax[0].get_xticks()
for i in range(1, 6 + 1):
ax2 = ax[i - 1].twiny()
ax2.set_xlim(limits)
ax2.set_xticks(mjdticks)
ax2.set_xticklabels(['' for date in ticks])
if i == 1:
ax2.set_xlabel('Civil date')
ax2.set_xticklabels([date.strftime("%d %b %y") for date in ticks])
_plt.setp( ax2.xaxis.get_majorticklabels(), rotation=45 )
_plt.subplots_adjust(left=0.13, right=0.8, top=0.88, bottom=0.06,
hspace=.15)
for f in fmt:
print ('SpecQ{1}.{0}'.format(f, addsuf))
_plt.savefig('SpecQ{1}.{0}'.format(f, addsuf), transparent=True)
_plt.close()
return
def din_spec(metadata, lbc=6562.86, hwidth=1500., res=50, interv=None,
fmt=['png'], outname='din_spec', pxsize=8, vmin=None, vmax=None, avg=True,
cmapn='inferno', refspec=None, figsize=None):
""" Plot dynamical specs. from metadata table of the Spec class.
`interv` controls the interval between specs (in days).
`res` is the resolution in km/s.
By default (`avg`=True), the average of spectra in that bin is show. If
`avg`=False, the nearest bin-centered (in time) spectra will be shown.
if `refspec` is not None, them it will be a difference spectra.
"""
# Define MJD and bins
dates = _np.array(metadata[:, 0], dtype=float)
t0 = _np.min(dates)
tf = _np.max(dates)
if interv is None:
interv = _np.linspace(t0, tf, 21)
else:
interv = _np.arange(t0, tf + interv, interv)
dt = interv[1] - interv[0]
# Select specs
wl0 = _np.arange(-hwidth, hwidth + res, res)
# Load refspec, if required
baselevel = 1.
if refspec is not None:
wl, flux, tmp, tmp, tmp, tmp = loadfits(refspec)
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
refflx = _np.interp(wl0, wl, flux)
baselevel = 0
fluxes = _np.zeros(( len(wl0), len(interv) )) + baselevel
for i in range(len(interv)):
# method 1
if not avg:
date = _phc.find_nearest(dates, interv[i])
if date < interv[i] + dt / 2 and date > interv[i] - dt / 2:
j = list(dates).index(date)
wl, flux, tmp, tmp, tmp, tmp = loadfits(metadata[j, 3])
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if refspec is None:
fluxes[:, i] = _np.interp(wl0, wl, flux)
else:
flux = _np.interp(wl0, wl, flux)
fluxes[:, i] = flux - refflx
# method 2
else:
k = 0
for j in range(len(dates)):
if dates[j] < interv[i] + dt / 2 and dates[j] > interv[i] - \
dt / 2:
wl, flux, tmp, tmp, tmp, tmp = loadfits(metadata[j, 3])
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
fluxes[:, i] += _np.interp(wl0, wl, flux)
k += 1
if k > 0:
# fluxes[:,i]/= k
wl = vel2wl(wl0, lbc)
tmp, fluxes[:, i] = lineProf(wl, fluxes[:, i], lbc=lbc,
hwidth=hwidth)
if refspec is not None:
fluxes[:, i] = fluxes[:, i] - refflx
if all(fluxes[:, i] == baselevel):
fluxes[:, i] = _np.NaN
# Create image
img = _np.empty((pxsize * len(interv), len(wl0)))
for i in range(len(interv)):
img[i * pxsize:(i + 1) * pxsize] = _np.tile(fluxes[:, i], pxsize).\
reshape(pxsize, len(wl0))
# Save image
if figsize is None:
fig, ax = _plt.subplots(figsize=(len(wl0) / 16, pxsize *
len(interv) / 16), dpi=80)
else:
fig, ax = _plt.subplots(figsize=figsize)
# _plt.figure(figsize=(len(wl0) / 16, pxsize * len(interv) / 16), dpi=80)
# print _np.min(img), _np.max(img)
cmapn = _plt.get_cmap(cmapn)
cmapn.set_bad('k', 1.)
ax.imshow(img, vmin=vmin, vmax=vmax, cmap=cmapn, origin='lower')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
# ax.set_xlim([-hwidth, hwidth])
ax.set_yticks(_np.linspace(pxsize*len(interv)*.1, pxsize*len(interv)*.9,
8))
ax.set_yticklabels([int(round((tf-t0)*t/(pxsize*len(interv))+t0))
for t in ax.get_yticks()], rotation='vertical')
ax.set_xticklabels([int(round(t*2.*hwidth/(len(wl0)-1)-hwidth)) for
t in ax.get_xticks()]) # , rotation='vertical')
# fig.tight_layout()
ax.xaxis.set_tick_params(color='gray', width=1.1)
ax.yaxis.set_tick_params(color='gray', width=1.1)
fig.gca().invert_yaxis()
_phc.savefig(fig, fmt=fmt, figname=outname)
return
def plot_line_str(fig, ax, lbc='', ylabel='', fs=14, xlim=None, dlim=None,
cmapn='gnuplot', lfs=10, ylim=None):
""" Line plotting structure """
if lbc != '':
ax.set_title(r'$\lambda_c$ = {0:.1f} $\AA$'.format(lbc), size=fs)
if ylabel != '':
ax.set_ylabel(ylabel, size=fs)
if xlim is not None:
ax.xlims = ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
ax.set_xlabel(r'Velocity (km s$^{-1}$)', size=fs)
# reverse to keep order consistent
ax.legend()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='upper right', labelspacing=0.1,
fancybox=True, framealpha=0.5, fontsize=lfs) # loc=(1.05, .01)
rect = _mpatches.Rectangle([0.835, 0.01], 0.15, 0.44, ec="black",
fc='white', transform=ax.transAxes, zorder=10, alpha=0.5)
ax.add_patch(rect)
ax3 = fig.add_axes([0.82, 0.12, 0.025, 0.35])
# ax3.set_axis_bgcolor('white')
cmap = _plt.get_cmap(cmapn)
norm = _mpl.colors.Normalize(vmin=dlim[0], vmax=dlim[1])
cb = _mpl.colorbar.ColorbarBase(ax3, cmap=cmap, norm=norm,
orientation='vertical')
cb.set_label('MJD', size=fs)
fig.subplots_adjust(left=0.1, right=0.95, top=0.94, bottom=0.1)
# , hspace=0.3, wspace=.3)
return fig, ax
def spec_time(speclist, lbc=6562.8, ref_spec=("/data/Dropbox/work/"
"sci_16-15aeri/alpEri_FEROS_2000AVE.mt"), mod_lbc=.656461, MJDref=None,
mod_ref=("/data/Dropbox/work/sci_16-15aeri/"
"fullsed_mod03_VDDn0_1p4e12_Be_aeri2014.sed2"),
fmt=['png', 'pdf'], outname=None, cmapn='inferno', hwidth=1000.,
outpath='', figsize=(5, 7), ysh=0.01):
""" Plot specs over time as suggested by Rivi.
``speclist`` is an array of strings to the path of the `*.fits` files.
``ref_spec`` is a reference `*.fits` and ``mod_ref`` an hdust reference
model. They are ignored if the path is not found.is
``ysh`` control the vertical separation of the profiles.
"""
if outname is None or outname == "":
outname = _phc.dtflag()
MJDs = [_np.inf, 0]
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
if MJD < MJDs[0]:
MJDs[0] = MJD
if MJD > MJDs[1]:
MJDs[1] = MJD
if MJDref is None:
MJDref = MJDs[0]
elif MJDs[0] > MJDref:
MJDs[0] = MJDref
# Plot
extrem = [_np.inf, 0]
fig, ax = _plt.subplots()
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
if len(flux) == 0:
raise NameError('Wrong lbc in spt.spe')
if cmapn is not None:
cor = _phc.gradColor([MJD], min=MJDs[0], max=(MJDs[1]+
0.1*(MJDs[1]-MJDs[0])), cmapn=cmapn)[0]
else:
cor = 'k'
# print(MJD, MJDs, extrem, ysh, (MJD-MJDs[0])*ysh, flux, sp)
ax.plot(vel, flux+(MJD-MJDs[0])*ysh, color=cor)
if _np.max(flux+(MJD-MJDs[0])*ysh) > extrem[1]:
extrem[1] = _np.max(flux+(MJD-MJDs[0])*ysh)
if _np.min(flux+(MJD-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJD-MJDs[0])*ysh)
# print(extrem)
if _os.path.exists(ref_spec):
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(ref_spec)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
# ax.text(650., 0.8, 'Reference', horizontalalignment='center',
ax.text(800., 0.8, 'Reference', horizontalalignment='center',
verticalalignment='center') # , transform=ax.transAxes)
ax.plot(vel, flux+(MJDref-MJDs[0])*ysh, color='k', ls=':')
# print(MJDref, MJDs, ysh, extrem, _np.min(flux), _np.max(flux))
if _np.min(flux+(MJDref-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJDref-MJDs[0])*ysh)
ax.plot(vel+5, flux+(57655-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(57655-MJDs[0])*ysh, 'Reference',
horizontalalignment='center', verticalalignment='center')
print('A!')
if _np.max(flux+(57655-MJDs[0])*ysh) > extrem[1]:
print('B!')
extrem[1] = _np.max(flux+(57655-MJDs[0])*ysh)
if _os.path.exists(mod_ref):
s2d = _hdt.readfullsed2(mod_ref)
vel, flux = lineProf(s2d[4, :, 2], s2d[4, :, 3], mod_lbc,
hwidth=hwidth)
ax.plot(vel, flux+(56910-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(56910-MJDs[0])*ysh, 'model',
horizontalalignment='center', verticalalignment='center')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
ax.set_ylim(extrem)
ax.set_xlim([-hwidth, hwidth])
# ax.set_yticks(_np.arange(56300, 57000+100, 100))
yref = [1., 1+_np.diff(MJDs)*ysh]
# yMJDs = _np.arange(56300, 57100, 100)
yMJDs = _np.arange(MJDs[0], MJDs[1], 100)
ax.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax.set_yticklabels(yMJDs, rotation='vertical')
fig.set_size_inches(figsize)
fig.subplots_adjust(left=0.1, right=0.94, top=0.99, bottom=0.04)
ax.minorticks_on()
ax3 = ax.twinx()
ax3.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax3.set_yticklabels([])
ax3.minorticks_on()
ax2 = ax.twinx()
ax2.spines['right'].set_position(('axes', 1.05))
ax2.set_ylabel('Civil date')
# dtminticks = _phc.gentkdates(56201., 57023., 1, 'm')
dtminticks = _phc.gentkdates(MJDs[0], MJDs[1], 1, 'm')
i = 1
# dtticks = _phc.gentkdates(56201., 57023., 3, 'm')
dtticks = _phc.gentkdates(MJDs[0], MJDs[1], 3, 'm')
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
dtticks]
while dtticks[0] not in dtminticks:
dtminticks = _phc.gentkdates(yMJDs[0]+i, yMJDs[-1], 1, 'm')
i += 1
minjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date
in dtminticks]
ax2.set_yticks(list(_phc.renormvals(mjdticks, MJDs, yref)))
ax2.set_yticks(list(_phc.renormvals(minjdticks, MJDs, yref)), minor=True)
xlabs = [date.strftime('%Y-%m-%d') for date in dtticks]
# xlabs[1::2] = ['']*len(xlabs[1::2])
ax2.set_yticklabels(xlabs, rotation='vertical')
ax2.set_ylim(extrem)
ax3.set_ylim(extrem)
ax.xaxis.set_tick_params(length=8, width=1.5)
ax.xaxis.set_tick_params(length=6, which='minor')
ax.yaxis.set_tick_params(length=4, which='minor')
ax.yaxis.set_tick_params(length=8, width=1.5)
ax2.yaxis.set_tick_params(length=4, which='minor')
ax2.yaxis.set_tick_params(length=8, width=1.5)
ax3.yaxis.set_tick_params(length=4, which='minor')
ax3.yaxis.set_tick_params(length=8, width=1.5)
# , fontsize=10)
_phc.savefig(fig, figname=outpath+outname, fmt=fmt)
return
def spec_time_Achernar(speclist, lbc=6562.8, fmt=['png', 'pdf'], outname=None,
cmapn='inferno', hwidth=1000., outpath='', figsize=(5, 15), ysh=0.01):
""" Plot specs over time as suggested by Rivi """
if outname is None or outname == "":
outname = _phc.dtflag()
MJDs = [_np.inf, 0]
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
if MJD < MJDs[0]:
MJDs[0] = MJD
if MJD > MJDs[1]:
MJDs[1] = MJD
MJDref = 56245
if MJDs[0] > MJDref:
MJDs[0] = MJDref
# Plot
extrem = [_np.inf, 0]
fig, ax = _plt.subplots()
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
if len(flux) == 0:
raise NameError('Wrong lbc in spt.spe')
if cmapn is not None:
cor = _phc.gradColor([MJD], min=MJDs[0], max=(MJDs[1]+
0.1*(MJDs[1]-MJDs[0])), cmapn=cmapn)[0]
else:
cor = 'k'
# print(MJD, MJDs, extrem, ysh, (MJD-MJDs[0])*ysh, flux, sp)
ax.plot(vel, flux+(MJD-MJDs[0])*ysh, color=cor)
if _np.max(flux+(MJD-MJDs[0])*ysh) > extrem[1]:
extrem[1] = _np.max(flux+(MJD-MJDs[0])*ysh)
if _np.min(flux+(MJD-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJD-MJDs[0])*ysh)
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits('/data/Dropbox/work'
'/sci_16-15aeri/alpEri_FEROS_2000AVE.mt')
vel, flux = lineProf(wl, flux, 6561.8, hwidth=hwidth)
ax.text(650., 0.8, 'photospheric ref.', horizontalalignment='center',
verticalalignment='center') # , transform=ax.transAxes)
ax.plot(vel, flux+(MJDref-MJDs[0])*ysh, color='k', ls=':')
if _np.min(flux+(MJDref-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJDref-MJDs[0])*ysh)
s2d = _hdt.readfullsed2('/data/Dropbox/work/sci_16-15aeri/'
'fullsed_mod03_VDDn0_1p4e12_Be_aeri2014.sed2')
vel, flux = lineProf(s2d[4, :, 2], s2d[4, :, 3], .656461, hwidth=hwidth)
ax.plot(vel, flux+(56910-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(56910-MJDs[0])*ysh, 'model',
horizontalalignment='center', verticalalignment='center')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
ax.set_ylim(extrem)
ax.set_xlim([-hwidth, hwidth])
# ax.set_yticks(_np.arange(56300, 57000+100, 100))
yref = [1., 1+_np.diff(MJDs)*ysh]
yMJDs = _np.arange(56300, 57100, 100)
ax.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax.set_yticklabels(yMJDs, rotation='vertical')
fig.set_size_inches(figsize)
fig.subplots_adjust(left=0.1, right=0.94, top=0.99, bottom=0.04)
ax.minorticks_on()
ax3 = ax.twinx()
ax3.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax3.set_yticklabels([])
ax3.minorticks_on()
ax2 = ax.twinx()
ax2.spines['right'].set_position(('axes', 1.05))
ax2.set_ylabel('Civil date')
dtminticks = _phc.gentkdates(56201., 57023., 1, 'm')
i = 1
dtticks = _phc.gentkdates(56201., 57023., 3, 'm')
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
dtticks]
while dtticks[0] not in dtminticks:
dtminticks = _phc.gentkdates(yMJDs[0]+i, yMJDs[-1], 1, 'm')
i += 1
minjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date
in dtminticks]
ax2.set_yticks(list(_phc.renormvals(mjdticks, MJDs, yref)))
ax2.set_yticks(list(_phc.renormvals(minjdticks, MJDs, yref)), minor=True)
xlabs = [date.strftime('%Y-%m-%d') for date in dtticks]
# xlabs[1::2] = ['']*len(xlabs[1::2])
ax2.set_yticklabels(xlabs, rotation='vertical')
ax2.set_ylim(extrem)
ax3.set_ylim(extrem)
ax.xaxis.set_tick_params(length=8, width=1.5)
ax.xaxis.set_tick_params(length=6, which='minor')
ax.yaxis.set_tick_params(length=4, which='minor')
ax.yaxis.set_tick_params(length=8, width=1.5)
ax2.yaxis.set_tick_params(length=4, which='minor')
ax2.yaxis.set_tick_params(length=8, width=1.5)
ax3.yaxis.set_tick_params(length=4, which='minor')
ax3.yaxis.set_tick_params(length=8, width=1.5)
# , fontsize=10)
_phc.savefig(fig, figname=outpath+outname, fmt=fmt)
return
def extractfromsplot(file, splot):
"""Ce = center; Co = core
#LcCe, LcCo, lcGW, lcEW, lvCe, lcCo, lvEW, lrCe, LrCo, lrEW
"""
out = _np.array(10 * [_np.NaN])
readflag = False
for line in splot:
if line.find(']:') > 0 and readflag:
readflag = False
if line.find(file) > 0:
readflag = True
if readflag:
info = line.split()
# if _re.match("^\d+?\.\d+?$", info[0]) is not None:
try:
float(info[0])
info = _np.array(info, dtype=float)
if info[0] > 6556 and info[0] < 6556 + 4.33:
if len(info) == 4:
out[6] = float(info[3])
elif len(info) == 7:
out[4] = float(info[0])
out[5] = float(info[4])
elif info[0] > 6556 + 4.33 and info[0] < 6556 + 2 * 4.33:
if len(info) == 4:
out[3] = float(info[3])
elif len(info) == 7:
out[0] = float(info[0])
out[1] = float(info[4])
out[2] = float(info[5])
elif info[0] > 6556 + 2 * 4.33 and info[0] < 6556 + 3 * 4.33:
if len(info) == 4:
out[9] = float(info[3])
elif len(info) == 7:
out[7] = float(info[0])
out[8] = float(info[4])
except:
pass
return out
def check_dtobs(dtobs):
""" Check if the dtobs fits the float format. Required for MJD calc. """
if 'T' in dtobs:
dtobs = dtobs.replace('.', '')
tobs, dtobs = dtobs.split('T')
if len(tobs) == 10:
dtobs, tobs = tobs, dtobs
tobs = tobs.split(':')
tobs = float(tobs[0]) * 3600 + float(tobs[1]) * 60 + float(tobs[2])
tobs /= (24 * 3600)
else:
tobs = 0.
if dtobs[4] == '-':
dtobs = dtobs.split('-')
elif dtobs[2] == '/':
dtobs = dtobs.split('/')[::-1]
else:
_warn.warn('Wrong "DATE-OBS" in header! {0}'.format(dtobs))
raise SystemExit(1)
dtobs = _np.array(dtobs, dtype='int32')
return dtobs, tobs
# TODO: Check if obsolete
def overplotsubdirs(path, star, limits=(6540, 6600), showleg=True):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e `path/star/star_specs.png`.
"""
# path = _os.getcwd()
# star = _phc.user_input('Type the star name: ')
# ref0 = 6540
# ref1 = 6600
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * \
imfits[0].header['CDELT1'] + \
imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[-1] > 6560: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
msg = '{0}, {1}, {2}'.format((0.1 * i), night, cal)
print(msg)
f0.writelines(msg + '\n')
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
_plt.plot(lbda, spec, label=leg, alpha=0.7,
color=_phc.colors[_np.mod(i,
len(_phc.colors))])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
msg = '{0}, {1}, {2}'.format('NC', night, 'None')
f0.writelines(msg + '\n')
if showleg:
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.ylim([-1, 5])
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs.png'.format(path, star))
_plt.close()
f0.close()
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def diffplotsubdirs(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e `path/star/star_specs_dif.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
# f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs with dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1) + \
(0.1 * i)
# print (0.1 * i)
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
_plt.plot([ref0, ref1], [1 + 0.1 * i, 1 + 0.1 * i],
'k--', alpha=0.5)
_plt.plot(lbda, spec, label=leg,
color=_phc.colors[i])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs_dif.png'.format(path, star))
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def refplotsubdirs(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e
`path/star/star_specs_REFERENCIA.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if
_os.path.isdir('{0}/{1}'.format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
srv = scal
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
# print (0.1 * i)
leg = imfits[0].header['DATE-OBS']
refleg = '2012-11-20T23:51:37.392'
refleg = '2008-06-13'
if leg == refleg:
f0 = open('{0}/{1}/ref.txt'.format(path, star),
'w')
f0.writelines([str(x) + '\t' for x in lbda])
f0.writelines('\n')
f0.writelines([str(x) + '\t' for x in spec])
f0.writelines('\n')
f0.close()
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
f0 = open('{0}/{1}/ref.txt'.format(path, star))
lines = f0.readlines()
f0.close()
specref = _np.array(lines[1].split(), dtype=float)
lbdaref = _np.array(lines[0].split(), dtype=float)
func = _interpolate.interp1d(lbdaref, specref) # , kind='cubic')
lbdaref = _np.linspace(ref0, ref1, 5000)
specref = func(lbdaref)
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
func = _interpolate.interp1d(lbda, spec)
# , kind='cubic')
# Tive problemas de 'out-of-bounds'... um espectro
# estava desordenado:
# print imfits[0].header['CDELT1'],
# imfits[0].header['CRVAL1'], cal
spec = func(lbdaref)
# print (0.1 * i)
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
if i < 130:
_plt.plot(lbdaref, spec - specref, label=leg,
alpha=0.8, color=_phc.colors[i])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.title('Ref.= %s' % refleg)
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs_{2}.png'.format(path, star, refleg[:10]))
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+\
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def overplotsubdirs2(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Ha' um criterio de escolha de espectros aqui (rudimentar).
Gera os arquivos `path/star/star.log` e `path/star/star_specs2.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
ax = _plt.figure()
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
# print (0.1 * i), night
prtcolor = _phc.colors[i]
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
check = False
if leg.find('2012-11-20T23:51:37.392') != -1:
leg = '2012-11-20'
prtcolor = _phc.colors[0]
check = True
elif leg.find('22/01/2013') != -1:
leg = '2013-01-22'
check = True
# elif leg.find('03/07/2013') != -1:
# leg = '2013-07-03'
# check = True
elif leg.find('28/07/2013') != -1:
leg = '2013-07-28'
check = True
elif leg.find('2013-11-12T01:30:38.938') != -1:
leg = '2013-11-12'
check = True
else:
print(leg)
if check:
print(cal)
_plt.plot(lbda, spec, label=leg, alpha=0.7,
color=prtcolor)
i += 1
else:
msg = '# Data not reduced for %s at %s!' % (star, night)
print(msg)
f0.writelines(msg)
font = { 'size': 16, }
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.ylim([.58, 1.2])
_plt.xlabel(r'wavelength ($\AA$)', fontdict=font)
_plt.ylabel('Normalized flux', fontdict=font)
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs2.png'.format(path, star))
_plt.close()
f0.close()
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def overPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
convgauss=0., frac=0., addsuf='', labels=None, hwidth=1000., ssize=.05,
outpath='', ylim=[.7, 2.2], cmapn='gnuplot'):
"""Generate overplot spec. line from a HDUST mod list, separated by
observers.
Observers config. must be the same between models in `fullseds` list.
If `convgauss` > 0, do a gaussian convolution.
"""
if labels is None:
labels = [''] * len(fullseds)
for obs in obsers:
fig, ax = _plt.subplots()
fig2, ax2 = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, yo) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, hwidth=hwidth + 3 * convgauss, ssize=ssize)
y1 = yo
y2 = 0.
if convgauss > 0:
step = _np.min([x[j + 1] - x[j] for j in range(len(x) - 1)])
xn = _np.arange(-hwidth-3*convgauss, hwidth+3*convgauss+step,
step)
cf = _phc.normgauss(convgauss, x=xn)
yo = _np.interp(xn, x, yo)
x = xn
y1 = yo * (1 - frac)
y2 = _np.convolve(yo * frac, cf / _np.trapz(cf), 'same')
ax2.plot(x, y1, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax2.plot(x, y2, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
y = y1 + y2
# y = linfit(x, y1+y2)
if file == fullseds[0]:
ax.plot(x, y, label='{0:02.1f} deg. {1}'.format(obsdegs[k],
labels[i]), color=_phc.colors[_np.mod(i, len(_phc.colors))])
# ew0 = EWcalc(x, y, vw=hwidth)
else:
ax.plot(x, y, color=_phc.colors[_np.mod(i, len(_phc.colors))],
label=labels[i])
# ewf = EWcalc(x, y, vw=hwidth)
plot_line_str(fig, ax, lbc=lbc, ylim=ylim, cmapn=cmapn, xlim=[-hwidth,
hwidth])
figname = outpath + 'modsover_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
_phc.savefig(fig, figname, fmt)
plot_line_str(fig2, ax2, lbc=lbc, ylim=ylim, cmapn=cmapn,
xlim=[-hwidth, hwidth])
figname = outpath + 'modsover_lbc{1:.4f}_obs{0:02.1f}{2}Extra'.\
format(obsdegs[k], lbc, addsuf)
_phc.savefig(fig, figname, fmt)
return
def overPlotLineFits(specs, lbc=.6564606, fmt=['png'], hwidth=1500.,
ylim=None, yzero=False, addsuf='', dlim=None, cmapn='jet', xlim=None,
outpath=''):
"""Generate overplot spec. line from a FITS file list.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y, label='{0}'.format(dateobs), color=cor)
ylabel = 'Overplotted spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsover_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def incrPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
addsuf='', outpath=''):
"""Generate incremented spec. line from a HDUST mod list, separated by
observers. The increment is 0.1 for each file in fullseds sequence.
Observers config. must be the same between models in `fullseds` list.
"""
for obs in obsers:
fig, ax = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc)
if file == fullseds[0]:
ax.plot(x, y + 0.1 * i, label='{0:02.1f} deg.'.format(
obsdegs[k]), color=_phc.colors[_np.mod(i,
len(_phc.colors))])
else:
ax.plot(x, y + 0.1 * i, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax.set_title(u'lbc = {0:.5f} $\mu$m'.format(lbc))
ax.legend(loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsincr_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
fig.savefig(figname + '.{0}'.format(f), transparent=True)
_plt.close()
return
def incrPlotLineFits(specs, lbc=.6564606, fmt=['png'], hwidth=1500.,
yzero=False, addsuf='', dlim=None, cmapn='jet', xlim=None, outpath='',
ylim=None):
"""Generate incremented spec. line from FITS files list.
The increment is 0.1 for each file in sequence.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y + 0.1 * i, label='{0}'.format(dateobs), color=cor)
if yzero:
ylim = ax.get_ylim()
ax.plot([0, 0], ylim, ls='-', color='Gray')
ylabel = 'Spaced spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsincr_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def diffPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
rvel=None, rflx=None, hwidth=1000., outpath='', addsuf=''):
"""Generate overplot of DIFFERENCE spec. line from a HDUST mod list.
The model will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
Observers config. must be the same between models in
`fullseds` list.
"""
for obs in obsers:
fig, ax = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, width=hwidth)
if rvel is None or rflx is None:
refspec = _hdt.readfullsed2(fullseds[0])
(vel, flx) = lineProf(refspec[obs, :, 2], refspec[obs, :, 3],
lbc=lbc, hwidth=hwidth)
else:
flx = _np.interp(x, rvel, rflx)
if file == fullseds[0]:
ax.plot(x, y - flx, label='{0:02.1f} deg.'.format(obsdegs[k]),
color=_phc.colors[_np.mod(i, len(_phc.colors))])
else:
ax.plot(x, y - flx, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax.set_title(u'lbc = {0:.5f} $\mu$m'.format(lbc))
ax.set_ylabel('Difference spectra (spec - ref.)')
ax.legend(fontsize=8, loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsdiff_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
_plt.close()
return
def diffPlotLineFits(specs, lbc=.6564606, fmt=['png'], xlim=None,
rvel=None, rflx=None, hwidth=1500., addsuf='', cmapn='jet', dlim=None,
outpath='', ylim=None):
"""Generate overplot of DIFFERENCE spec. line from a FITS files list.
The observations will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
If `cmap` is None or empty, the phc.colors vector is read.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if rvel is None or rflx is None:
# wl0, flux0, MJD, dateobs0, datereduc, fitsfile = \
# loadfits(specs[0])
# (rvel,flx) = lineProf(wl0, flux0, lbc=lbc, hwidth=hwidth)
# flx = _np.interp(x, rvel, rflx)
rvel = x
rflx = y
flx = y[:]
else:
flx = _np.interp(x, rvel, rflx)
# if spec == specs[0]:
# ax.plot(x, y-flx, label='{0}'.format(dateobs), \
# color= _phc.colors[_np.mod(i, len(_phc.colors))])
# else:
# ax.plot(x, y-flx, color= _phc.colors[_np.mod(i,
# len(_phc.colors))])
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y - flx, label='{0}'.format(dateobs), color=cor)
ylabel = 'Difference spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsdiff_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def diffPlotLineObs(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
rvel=None, rflx=None, hwidth=1000., addsuf='', outpath=''):
"""Generate overplot of DIFFERENCE spec. line from a HDUST OBSERVERS list.
The model will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first observer of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
Observers config. must be the same between models in
`fullseds` list.
"""
for file in fullseds:
fig, ax = _plt.subplots()
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
for obs in obsers:
i = obsers.index(obs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, hwidth=hwidth)
if rvel is None or rflx is None:
(vel, flx) = lineProf(sed2data[obsers[0], :, 2],
sed2data[obsers[0], :, 3], lbc=lbc, hwidth=hwidth)
else:
flx = _np.interp(x, rvel, rflx)
ax.plot(x, y - flx, label='{0:02.1f} deg.'.format(obsdegs[i]),
color=_phc.colors[_np.mod(i, len(_phc.colors))])
ax.set_title(u'lbc={0:.5f}$\mu$m, {1}'.format(lbc,
_phc.trimpathname(file)[1]))
ax.set_ylabel('Difference spectra (spec - ref.)')
ax.legend(fontsize=8, loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsdiff_lbc{1:.4f}{0}'.format(addsuf, lbc)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
fig.savefig(figname + '.{0}'.format(f), transparent=True)
_plt.close()
return
def max_func_pts(x, y, ws=0.01, avgbox=3):
""" `ws` window size where the maximum will be evaluated. Example: `ws=0.02`
corresponds to 2% of the length of the input. """
x, y = (_np.array(x), _np.array(y))
N = len(x)
parts = _phc.splitequal(N*ws, N)
n = len(parts)
xout, yout = (_np.zeros(n), _np.zeros(n))
for i in range(n):
p = parts[i]
Y = y[p[0]:p[1]]
X = x[p[0]:p[1]]
idx = _np.argsort(Y)
xout[i] = _np.average(X[idx][-avgbox:])
yout[i] = _np.average(Y[idx][-avgbox:])
return xout, yout
def sum_ec(fwl, fflx):
dmin = _np.inf
wlmin = _np.inf
wlmax = 0
for f in fwl:
if _np.min(_np.diff(f)) < dmin:
dmin = _np.min(_np.diff(f))
if _np.min(f) < wlmin:
wlmin = _np.min(f)
if _np.max(f) > wlmax:
wlmax = _np.max(f)
swl = _np.arange(wlmin, wlmax, dmin)
sflx = _np.zeros(len(swl))
for i in range(len(fwl)):
idx = _np.where( (swl > _np.min(fwl[i])) & (swl < _np.max(fwl[i])) )
sflx[idx] += _np.interp(swl[idx], fwl[i], fflx[i])
return swl, sflx
def lbdc2range(lbdc):
""" Function doc
"""
dl = lbdc[1] - lbdc[0]
return _np.linspace(lbdc[0] - dl / 2, lbdc[-1] + dl / 2, len(lbdc) + 1)
def classify_specs(list_of_specs, starid, instrument, calib, comment=''):
"""Do useful things with a list of FITS specs to classify them.
It will (1) generate figures of the specs, with line info; (2) classify
the band of observation; (3) copy the file with a standard name.
"""
lines = [6562.79, 4861.35, 4340.472, 4101.734, 21655.2488]
lnames = ['Ha', 'Hb', 'Hc', 'Hd', "Brg"]
list_of_specs = list(list_of_specs)
list_of_specs.sort()
for s in list_of_specs:
print(s)
wl, flux, MJD, dateobs, datereduc, fitsfiles = loadfits(s)
fig, ax = _plt.subplots()
ax.plot(wl, flux, label=dateobs)
wlrange = [_np.min(wl), _np.max(wl)]
flxrange = [_np.min(flux), _np.max(flux)]
band = 'unknown'
# print(wlrange)
for l in lines:
if _phc.is_inside_ranges(l, wlrange):
ax.plot([l, l], flxrange, '--', color='gray')
if wlrange[0] > l*0.91 and wlrange[1] < l*1.09:
band = lnames[lines.index(l)]
if band == 'unknown':
if wlrange[1] > 9000 and wlrange[1] < 25000:
band = 'nIR'
if wlrange[0] > 5300 and wlrange[1] < 11000:
band = 'RI'
if wlrange[0] < 4100 and wlrange[1] < 6000:
band = 'BV'
if wlrange[0] < 3700 and wlrange[1] < 6000:
band = 'UV'
if wlrange[0] < 4700 and wlrange[1] > 6700:
band = 'Vis'
ax.set_title(s)
ax.legend()
figname = _os.path.splitext(s)
_phc.savefig(fig, figname=list(figname)[0])
expname = '{}_{}_{}'.format(starid, instrument, band)
if len(comment) > 0:
expname += '_' + comment
expname += "_{}_{:04d}".format( int(MJD), int(round(1e4*(MJD % 1))) )
expname += ".{}.fits".format(calib)
_copyfile(s, expname)
def automatic_BeSS(RA, DEC, size='0.2', date_lower='1000-01-01', date_upper="3000-01-01", band_lower='6.4e-7', band_upper='6.7e-7'):
"""
This is a script for downloading BeSS spectra, directly from the database website,
using VO Table and pandas dataframes
Parameters
----------
RA : str
Right ascension [° J200] as string
DEC : str
Declination [° J2000] as string
size: str
Radius of the cone search in degree as string
date_lower: str
Initial date in format yyyy-mm-dd as string
date_upper: str
Final date in format yyyy-mm-dd as string
band lower: str
Initial wavelength [meters] in scientific notation as string
band_upper: str
Final wavelength [meters] in scientific notation as string
Returns
-------
None, the routine downloads file in the script directory.
Example
-------
#Halpha for 25 Cyg from 2019-10-01 to 2020-03-27
>>> RA = "299.979"
>>> DEC = "37.04"
>>> date_lower = "2019-10-01"
>>> date_upper = "2020-03-27"
>>> automatic_BeSS(RA, DEC, size='0.1', date_lower, date_upper, band_lower='6.4e-7', band_upper='6.7e-7')
#Data downloaded in the script directory
-------
#Download all Ha data of a star
>>> automatic_BeSS(RA="299.979", DEC="37.04")
Routine written by Pedro Ticiani dos Santos
IMPORTANT NOTE: When using this function, the downloaded files go to the script
directory. This is something still undergoing work.
"""
user_url = 'http://basebe.obspm.fr/cgi-bin/ssapBE.pl?POS={0},{1}&SIZE={2}&BAND={3}/{4}&TIME={5}/{6}'.format(RA, DEC, size, band_lower, band_upper, date_lower, date_upper)
r = _requests.get(url = user_url)
# xml parsed => dict
global_dict = _xmltodict.parse(r.text)
# Interesting data selection
entries_list = global_dict['VOTABLE']['RESOURCE']['TABLE']['DATA']['TABLEDATA']['TR']
# Dataframe init (eq. Table)
df01 = _pd.DataFrame()
# Browse through the entries and record it in the dataframe df01
for item in entries_list:
# Create a row for the dataframe
p01 = {'Fits URL': item['TD'][0],
'Target name': item['TD'][45],
"Target class": item['TD'][46],
"vo_format": item['TD'][1]}
# add row in progress in the dataframe
df01 = df01.append(p01, ignore_index=True)
# Dataframe init
df02 = _pd.DataFrame()
# Iteration on each row
for item in entries_list:
vo_url_fits = item['TD'][0]
try:
# Download of each file in progress with his url
file_bess = _wget.download(vo_url_fits)
# Opening FITS
fits_in_progress = _pyfits.open(file_bess)
# Retrieve header information for fits in progress
header_fits_ip = fits_in_progress[1].header
# catch potential errors
except IOError:
print("Error downloading fits file.")
# Create a row for the dataframe
# with VO Table value + Header infos
p02 = {'Fits URL': item['TD'][0],
'Target name': item['TD'][45],
"Target class": item['TD'][46],
"Resolution" : header_fits_ip['SPEC_RES'],
"Creation Date" : header_fits_ip['DATE']}
# add row in progress in the dataframe
df02 = df02.append(p02, ignore_index=True)
# if you want to download only the first file, change : to 1.
download = _pyfits.open(_wget.download(df02.iloc[0][:]))
# THE FILES DOWNLOADED ARE IN VOTABLE FORMAT. Some scripts must be changed
# in the .fits reading part when extracting wavelength and flux values.
# MAIN ###
if __name__ == "__main__":
pass
|
danmoser/pyhdust
|
pyhdust/spectools.py
|
Python
|
gpl-3.0
| 130,184
|
[
"Gaussian"
] |
ed75e46ab82f21701cb49538289d991236f0c8d2c6263638cefe2cebf51186fb
|
import mdtraj as md
import tensorflow as tf
import tftraj.rmsd_op
import numpy as np
import math
def main():
rmsd_op = tftraj.rmsd_op.load()
traj = md.load(['../../fs_peptide/trajectory-{}.xtc'.format(i + 1) for i in range(28)],
top='../../fs_peptide/fs-peptide.pdb')
traj = traj[::10]
print("The trajectory has {} frames".format(len(traj)))
target = tf.Variable(tf.truncated_normal((1, traj.xyz.shape[1], 3), stddev=0.3), name='target')
msd, rot = rmsd_op.pairwise_msd(traj.xyz, target)
loss = tf.reduce_mean(msd, axis=0)
optimizer = tf.train.AdamOptimizer(1e-3)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
conformations = []
print("{:>5s}{:>15s}{:>15s}".format("step", "loss", "rmsd (A)"))
for step in range(2500):
if step % 10 == 0:
_loss = sess.run(loss)[0]
_rmsd = math.sqrt(_loss) * 10
print("{:5d}{:15.5f}{:15.5f}".format(step, _loss, _rmsd))
conformations += [sess.run(target)]
sess.run(train)
conformations = np.array(conformations)
assert conformations.shape[1] == 1
conformations = conformations[:,0]
new_traj = md.Trajectory(xyz=conformations, topology=traj.topology)
new_traj.save('consensus.nc')
if __name__ == '__main__':
main()
|
mdtraj/tftraj
|
examples/rmsd-consensus/rmsd-consensus.py
|
Python
|
mit
| 1,368
|
[
"MDTraj"
] |
bad833d978384e6b4fa5e25edb3295f80e181b6278a5914ed84b1a9cc6c7028e
|
#!/usr/bin/python
import sys
def read_in_fasta_sequences(input_file,input_file_format):
"""Function that reads in a file of sequences
Using biopython SeqIO and returns a list of SeqIO seq record objects"""
import Bio.SeqIO
sequence_record_list=[]
for seq_record in Bio.SeqIO.parse(input_file,input_file_format):
sequence_record_list.append(seq_record)
return sequence_record_list
def consensus_sequence(seq_record_list):
"""Function that takes in sequence list of equal length (from read_in_fasta_sequences_function)
and builds a character matrix out of them to determine the consensus sequence (based on most common
nt at that posistion)"""
running_max_index=0
acount_list, ccount_list, gcount_list, tcount_list, raw_seq_list, character_Matrix, index_consensus, consensus_string_list=[],[],[],[],[],[],[],[]
for index in range(len(seq_record_list)):
raw_seq_list.append(seq_record_list[index].seq)
for sequence in raw_seq_list:
character_Matrix.append([character for character in sequence])
for j in range(len(character_Matrix[0])):
acount, tcount, gcount, ccount = 0,0,0,0
for i in range(len(character_Matrix)):
if character_Matrix[i][j]=="T":
tcount+=1
elif character_Matrix[i][j]=="A":
acount+=1
elif character_Matrix[i][j]=="C":
ccount+=1
elif character_Matrix[i][j]=="G":
gcount+=1
else:
print "Error building matrix"
acount_list.append(acount)
ccount_list.append(ccount)
gcount_list.append(gcount)
tcount_list.append(tcount)
profile=[acount_list,ccount_list,gcount_list,tcount_list]
for j in range(len(profile[0])):
running_max=profile[0][j]
for i in range(len(profile)):
if profile[i][j]>=running_max:
running_max=profile[i][j]
running_max_index=i
if i==(len(profile)-1):
index_consensus.append(running_max_index)
for i in range(len(index_consensus)):
if index_consensus[i]==0:
consensus_string_list.append("A")
elif index_consensus[i]==1:
consensus_string_list.append("C")
elif index_consensus[i]==2:
consensus_string_list.append("G")
elif index_consensus[i]==3:
consensus_string_list.append("T")
consensus_string=(''.join(consensus_string_list))
#print "A:",(' '.join(map(str,profile[0])))
#print "C:",(' '.join(map(str,profile[1])))
#print "G:",(' '.join(map(str,profile[2])))
#print "T:",(' '.join(map(str,profile[3])))
return(consensus_string)
def directed_adjacency_graph(sequences):
"""Build a directed adjacency graph from a list of seqrecord objects"""
sequence_dictionary={}
adjacency_graph_list=[]
for index in range(len(sequences)):
sequence_dictionary.update({sequences[index].id:str(sequences[index].seq)})
for keys in sequence_dictionary:
for keys2 in sequence_dictionary:
overlap=min(len(sequence_dictionary[keys]),len(sequence_dictionary[keys]))
while overlap>0:
if (sequence_dictionary[keys])[-overlap:] == (sequence_dictionary[keys2])[:overlap]:
break
overlap -= 1
if overlap > 1 and keys != keys2:
adjacency_tuple=(keys, keys2)
adjacency_graph_list.append(adjacency_tuple)
return adjacency_graph_list
def hamming_distance(sequences):
"""Calculates the hamming distance between sequences"""
import itertools
hamming_distance_list=[]
pairwise_combinations=list(itertools.combinations(sequences,2))
for index in range(len(pairwise_combinations)):
sequence_record_a=pairwise_combinations[index][0]
sequence_record_b=pairwise_combinations[index][1]
hamm=sum(ch1 != ch2 for ch1, ch2 in zip(sequence_record_a.seq, sequence_record_b.seq))
hamming_tuple=(sequence_record_a.id, sequence_record_b.id, hamm)
hamming_distance_list.append(hamming_tuple)
return hamming_distance_list
if __name__=="__main__":
sequences=read_in_fasta_sequences(sys.argv[1],"fasta")
print(consensus_sequence(sequences))
|
fmaguire/scripts
|
rosalind/rosalind_solutions.py
|
Python
|
mit
| 4,346
|
[
"Biopython"
] |
d74b79ed93d63fe2314781e3885b41b08035a8e8ff5355461ba7867a3e2fc038
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.editors.categoryeditor import SellableCategoryEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestSellableCategoryEditor(GUITest):
def test_create(self):
editor = SellableCategoryEditor(self.store)
self.check_editor(editor, 'editor-sellablecategory-create')
def test_description_validation(self):
# Just create an existing category to check unique value above
self.create_sellable_category(u'Existing category')
editor = SellableCategoryEditor(self.store)
self.assertInvalid(editor, ['description'])
editor.description.update('Non-existing category')
self.assertValid(editor, ['description'])
editor.description.update('Existing category')
self.assertInvalid(editor, ['description'])
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_sellablecategoryeditor.py
|
Python
|
gpl-2.0
| 1,701
|
[
"VisIt"
] |
a6877382be6629745f802c59f7e240b3bc64a8415f27a972cb89f357b2b48d36
|
# -*- coding: utf-8 -*-
#
desc = 'Noise with a color bar'
# phash = 'f54b0ba50bb10bf4'
phash = 'fd5a0bb503f50354'
def plot():
from numpy.random import randn
from matplotlib import pyplot as plt
import numpy as np
# Make plot with vertical (default) colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
np.random.seed(123)
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest')
ax.set_title('Gaussian noise with vertical colorbar')
# Add colorbar, make sure to specify tick locations
# to match desired ticklabels.
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
# vertically oriented colorbar
cbar.ax.set_yticklabels(['< -1', '0', '> 1'])
return fig
|
dougnd/matplotlib2tikz
|
test/testfunctions/noise.py
|
Python
|
mit
| 748
|
[
"Gaussian"
] |
421d107b81e9e67b3889efa96f88cfd5d652ceb0394b4c0140fe416dfc50de39
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import time
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
import os
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.menu import StringOption, MediaOption, NumberOption
from gramps.gen.utils.file import media_path_full
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
#------------------------------------------------------------------------
#
# SimpleBookTitle
#
#------------------------------------------------------------------------
class SimpleBookTitle(Report):
""" This report class generates a title page for a book. """
def __init__(self, database, options, user):
"""
Create SimpleBookTitle object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
title - Title string.
subtitle - Subtitle string.
imgid - Gramps ID of the media object to use as an image.
imgsize - Size for the image.
footer - Footer string.
"""
Report.__init__(self, database, options, user)
self._user = user
menu = options.menu
self.title_string = menu.get_option_by_name('title').get_value()
self.image_size = menu.get_option_by_name('imgsize').get_value()
self.subtitle_string = menu.get_option_by_name('subtitle').get_value()
self.footer_string = menu.get_option_by_name('footer').get_value()
self.object_id = menu.get_option_by_name('imgid').get_value()
def write_report(self):
""" Generate the contents of the report """
self.doc.start_paragraph('SBT-Title')
self.doc.write_text(self.title_string)
self.doc.end_paragraph()
self.doc.start_paragraph('SBT-Subtitle')
self.doc.write_text(self.subtitle_string)
self.doc.end_paragraph()
if self.object_id:
the_object = self.database.get_object_from_gramps_id(self.object_id)
filename = media_path_full(self.database, the_object.get_path())
if os.path.exists(filename):
if self.image_size:
image_size = self.image_size
else:
image_size = min(
0.8 * self.doc.get_usable_width(),
0.7 * self.doc.get_usable_height() )
self.doc.add_media_object(filename, 'center',
image_size, image_size)
else:
self._user.warn(_('Could not add photo to page'),
_('File %s does not exist') % filename)
self.doc.start_paragraph('SBT-Footer')
self.doc.write_text(self.footer_string)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# SimpleBookTitleOptions
#
#------------------------------------------------------------------------
class SimpleBookTitleOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
""" Add the options for this report """
category_name = _("Report Options")
title = StringOption(_('book|Title'), _('Title of the Book') )
title.set_help(_("Title string for the book."))
menu.add_option(category_name, "title", title)
subtitle = StringOption(_('Subtitle'), _('Subtitle of the Book') )
subtitle.set_help(_("Subtitle string for the book."))
menu.add_option(category_name, "subtitle", subtitle)
dateinfo = time.localtime(time.time())
rname = self.__db.get_researcher().get_name()
footer_string = _('Copyright %(year)d %(name)s') % {
'year' : dateinfo[0], 'name' : rname }
footer = StringOption(_('Footer'), footer_string )
footer.set_help(_("Footer string for the page."))
menu.add_option(category_name, "footer", footer)
imgid = MediaOption(_('Image'))
imgid.set_help( _("Gramps ID of the media object to use as an image."))
menu.add_option(category_name, "imgid", imgid)
imgsize = NumberOption(_('Image Size'), 0, 0, 20, 0.1)
imgsize.set_help(_("Size of the image in cm. A value of 0 indicates "
"that the image should be fit to the page."))
menu.add_option(category_name, "imgsize", imgsize)
def make_default_style(self, default_style):
"""Make the default output style for the Simple Boot Title report."""
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=16, bold=1, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(1)
para.set_alignment(PARA_ALIGN_CENTER)
para.set(pad=0.5)
para.set_description(_('The style used for the title of the page.'))
default_style.add_paragraph_style("SBT-Title", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set(pad=0.5)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the subtitle.'))
default_style.add_paragraph_style("SBT-Subtitle", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=10, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set(pad=0.5)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the footer.'))
default_style.add_paragraph_style("SBT-Footer", para)
|
Forage/Gramps
|
gramps/plugins/textreport/simplebooktitle.py
|
Python
|
gpl-2.0
| 7,542
|
[
"Brian"
] |
3b94b7eddf27b1b7fb5cc720313989825d22c1c67a9f2833da7cf9a72a13fc6c
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import unittest
from stoqlib.domain.account import Account
from stoqlib.gui.editors.accounteditor import AccountEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestAccountEditor(GUITest):
def test_create(self):
editor = AccountEditor(self.store)
self.check_editor(editor, 'editor-account-create')
def test_confirm(self):
editor = AccountEditor(self.store)
self.assertFalse(editor.validate_confirm())
editor.description.update('Account name')
self.assertTrue(editor.validate_confirm())
editor.main_dialog.confirm()
self.check_editor(editor, 'editor-account-confirm',
[editor.retval])
def test_show(self):
account = self.create_account()
editor = AccountEditor(self.store, account)
# Created account must not be in accounts tree of editor.
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show')
def test_show_banco_do_brasil(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(1)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-banco-do-brasil')
def test_show_banrisul(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(41)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-banrisul')
def test_show_bradesco(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(237)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-bradesco')
def test_show_caixa(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(104)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-caixa')
def test_show_itau(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(341)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-itau')
def test_show_real(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(356)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-real')
def test_show_santander(self):
account = self.create_account()
account.account_type = Account.TYPE_BANK
editor = AccountEditor(self.store, account)
editor.bank_type.select_item_by_data(33)
self.assertFalse(editor.parent_accounts.get_account_by_id(account.id))
self.check_editor(editor, 'editor-account-show-santander')
if __name__ == '__main__':
from stoqlib.api import api
c = api.prepare_test()
unittest.main()
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_account_editor.py
|
Python
|
gpl-2.0
| 4,606
|
[
"VisIt"
] |
e3744b98ac0d63952acb871a586266e773209d239e09de5bb594f53fd9444d3c
|
"""
deep Boltzmann machines (greedy unsupervised pretraining + supervised training)
"""
import logging
from itertools import repeat
from .abcs import OpTrain
from tsdl.data import OpDataset
from tsdl.tools import Classification
from lazyflow.operator import InputSlot
from pylearn2.models import mlp
from pylearn2.models import rbm
from pylearn2.training_algorithms import sgd
from pylearn2.training_algorithms import bgd
from pylearn2.training_algorithms import learning_rule
from pylearn2.train_extensions import best_params
from pylearn2 import termination_criteria
from pylearn2 import blocks
from pylearn2 import train
from pylearn2.costs import ebm_estimation
from pylearn2.datasets import transformer_dataset
LOGGER = logging.getLogger(__name__)
class OpDeepTrain(OpTrain, Classification):
"""
create an MLP and train it with the DBN approach
(Classifier is compatible with OpMLPPredict)
"""
NumHiddenLayers = InputSlot()
SizeHiddenLayers = InputSlot()
_dbn = None
_layers = []
@classmethod
def build(cls, d, parent=None, graph=None, workingdir=None):
"""
configuration needs:
d["num_hidden_layers"] = <int>
d["size_hidden_layers"] = <int>
"""
operator = cls(parent=parent, graph=graph)
operator.NumHiddenLayers.setValue(d["num_hidden_layers"])
operator.SizeHiddenLayers.setValue(d["size_hidden_layers"])
return operator
def __init__(self, *args, **kwargs):
super(OpDeepTrain, self).__init__(*args, **kwargs)
self._train_data = OpDataset(parent=self)
self._train_data.Input.connect(self.Train)
self._valid_data = OpDataset(parent=self)
self._valid_data.Input.connect(self.Valid)
def setupOutputs(self):
super(OpDeepTrain, self).setupOutputs()
self._configure_layers()
def execute(self, slot, _, roi, result):
self._pretrain()
self._train_all()
result[0] = self._dbn
def setInSlot(self, slot, subindex, roi, value):
raise NotImplementedError()
def _configure_layers(self):
"""
create all the RBM layers according to configuration
"""
nvis = self.Train[0].meta.shape[1]
layers = []
layer_sizes = get_layer_size_iterator(self.SizeHiddenLayers.value)
for _ in range(self.NumHiddenLayers.value):
nhid = layer_sizes.next()
layer = rbm.RBM(nvis=nvis, nhid=nhid,
irange=4, init_bias_hid=4,
init_bias_vis=0,
monitor_reconstruction=True)
nvis = nhid
layers.append(layer)
n_out = self.Train[1].meta.shape[1]
output = mlp.Softmax(n_out, 'output', irange=.1)
layers.append(output)
self._layers = layers
def _pretrain(self):
"""
run greedy pretraining on all RBM layers
"""
cost = ebm_estimation.CDk(1)
# cost = ebm_estimation.SML(10, 5)
dataset = self._train_data
def get_transform(layers):
"""
closure for mapping the original dataset through pretrained layers
"""
transformed_dataset = transformer_dataset.TransformerDataset(
raw=dataset,
transformer=blocks.StackedBlocks(layers=layers))
return transformed_dataset
transformed_dataset = dataset
for i, layer in enumerate(self._layers):
if not self._is_pretrainable(layer):
continue
LOGGER.info("============ TRAINING UNSUPERVISED ============")
LOGGER.info("============ Layer %d ============", i)
channel = "train_reconstruction_error"
lra = sgd.MonitorBasedLRAdjuster(channel_name=channel)
keep = best_params.MonitorBasedSaveBest(
channel_name=channel, store_best_model=True)
ext = [lra, keep]
criteria = get_termination_criteria(epochs=200, channel=channel)
algorithm = sgd.SGD(learning_rate=.05, batch_size=50,
learning_rule=learning_rule.Momentum(
init_momentum=.5),
termination_criterion=criteria,
monitoring_dataset={'train':
transformed_dataset},
monitor_iteration_mode="sequential",
monitoring_batch_size=1000,
cost=cost,
seed=None,
train_iteration_mode='sequential')
trainer = train.Train(dataset=transformed_dataset, model=layer,
algorithm=algorithm,
extensions=ext)
trainer.main_loop()
# set best parameters to layer
layer.set_param_values(keep.best_model.get_param_values())
LOGGER.info("Restoring model with cost %f", keep.best_cost)
# redefinition of transformed_dataset is ok
# pylint: disable=R0204
transformed_dataset = get_transform(self._layers[:i+1])
# pylint: enable=R0204
def _train_all(self):
"""
supervised training (after unsupervised pretraining)
"""
LOGGER.info("============ TRAINING SUPERVISED ============")
nvis = self.Train[0].meta.shape[1]
tds = self._train_data
vds = self._valid_data
channel = "valid_output_misclass"
criteria = get_termination_criteria(epochs=200, channel=channel)
algorithm = bgd.BGD(line_search_mode='exhaustive',
batch_size=1000,
monitoring_dataset={'valid': vds},
monitoring_batch_size=1000,
termination_criterion=criteria,
seed=None)
layers = self._layers
def layer_mapping(index, layer):
"""
tell MLP that RBM layers are pretrained
"""
if not self._is_pretrainable(layer):
return layer
name = "{}_{:02d}".format(layer.__class__.__name__, index)
return mlp.PretrainedLayer(layer_name=name,
layer_content=layer)
layers = [layer_mapping(index, layer)
for index, layer in enumerate(layers)]
dbn = mlp.MLP(layers, nvis=nvis)
trainer = train.Train(dataset=tds, model=dbn,
algorithm=algorithm,
extensions=[])
trainer.main_loop()
self._dbn = dbn
@staticmethod
def _is_pretrainable(layer):
"""
can this layer be pretrained?
"""
return isinstance(layer, rbm.RBM)
def get_layer_size_iterator(int_or_iterable):
"""
get an iterator for different inputs:
iterable -> iterate over iterable
other type -> repeat this type ad infinitum
"""
try:
iter_ = iter(int_or_iterable)
except TypeError:
# layer size is a single integer
iter_ = repeat(int_or_iterable)
while True:
yield iter_.next()
def get_termination_criteria(epochs=None, channel=None):
"""
construct AND'ed termination criteria from
* max number of training epochs
* non-decrease in some monitored channel
"""
criteria = []
if epochs is not None:
criteria.append(termination_criteria.EpochCounter(epochs))
if channel is not None:
criteria.append(termination_criteria.MonitorBased(
channel_name=channel, prop_decrease=.00, N=20))
return termination_criteria.And(criteria)
|
burgerdev/hostload
|
tsdl/classifiers/deep.py
|
Python
|
mit
| 7,930
|
[
"CDK"
] |
28a64ed09bf9ff162aabb9f760974eea4f2dd71131eb7687ce93d6dc71cc5c39
|
class mutation_stability(object):
'''
check mutation and format it so that it's compatible with foldx structure 1HA0 and 2YP7
'''
def __init__(self, mut, structure):
self.mut = mut # list of mutations
self.mut_chain_info =[]
self.structure = structure # either 1HA0 or 2YP7
if self.structure not in ["1HA0", "2YP7", "2YP2"]:
raise ValueError("This program only works for pdb structures 1HA0, 2YP7, 2YP2")
def __str__(self):
return ", ".join(self.mut_chain_info_set)
def site_range_valid(self, mutation):
'''
protein structures (1HA0, 2YP7) are missing certain amino acid sites, method checks that mutation is in structure
:param mutation: mutation in standard format
:return: true if site is in structure, false if site range is not in structure
'''
lowerRange = 9
upperRange = 502
missing_lower = 328
missing_upper = 333
site = int(mutation[1:len(mutation) - 1])
if missing_lower <= site <= missing_upper: # in missing middle section
return False
elif lowerRange <= site <= upperRange: # in range of protein structure besides middle section
return True
else:
return False
def include_chain_info(self, mutation):
'''
includes chain information for each mutation passed to function. HA is a trimer so need to specify chain for
foldx
:param mutation: mutation in standard format
'''
if self.structure == "1HA0":
chains = ["A", "M", "Y"]
elif self.structure == "2YP7":
chains = ["A", "P", "E"]
elif self.structure == "2YP2":
chains = ["A", "R", "I"]
site = mutation[1:len(mutation) - 1]
aa1 = mutation[0]
aa2 = mutation[len(mutation)-1]
return [aa1+chain+site+aa2 for chain in chains]
def get_formatted_mutations(self):
'''
checks each mutation in mut_set that it is a valid mutation for the structures 1HA0, 2YP7. Calls
include_chain_info, which adds each mutation with chain info to self.mut_chain_info_set.
'''
self.valid_mut_list = filter(lambda mut: self.site_range_valid(mut), self.mut)
for mutation in self.valid_mut_list:
self.mut_chain_info.extend(self.include_chain_info(mutation))
return ';'.join(self.mut_chain_info)
|
blab/stability
|
augur/stability-data/foldx_essentials/mutation_stability.py
|
Python
|
agpl-3.0
| 2,451
|
[
"FoldX"
] |
c5b8c30a6a1c3680c4b2eb5abdfc49cdad6b3f2f6ff6048be488585a6393f6a3
|
#!/usr/bin/env python
# Todo Copy V1.4 by Dan Rahmel
# (c) Copyright 2007-2017 by Dan Rahmel
# Created December 21, 2007
# Todo Copy can be controlled either by direct commands or through an XML-based batch file.
# If no parameters are given when it is executed, it will look for the batch file tc_autorun.xml
# and execute the script items inside there.
# Sample usage:
# python todocopy.py
# python todocopy.py example_exec.xml
# python todocopy.py example_exec.xml target1
# python todocopy.py copy ./ ../yhw_production # Copy files from -> to
# python todocopy.py copylist filelist.txt ../yhw_production # Copy all files in list with automatic path creation
# python todocopy.py -l filelist.txt --ftp 127.127.10.199 # FTP
# python todocopy.py dbsummary -o dbsumm.xml # Generate database summary and output to .xml file
# python todocopy.py dbsummary tbvenue,tbtasktype -o db.xml # Generate database summary of 2 tables and output to .xml file
import os, sys, zlib, zipfile, time, dircache, re, datetime, calendar
from optparse import OptionParser
from time import strftime
import xml.dom.minidom
imports = {}
# Import optional libraries
try:
import ftplib
imports['ftp']=True
except:
pass
try:
import shutil
imports['shutil']=True
except:
pass
try:
import MySQLdb
imports['mysql']=True
except:
pass
try:
import smtplib
import email, email.MIMEBase, email.MIMEText, email.MIMEMultipart
imports['smtp']=True
except:
pass
try:
import md5
imports['md5']=True
except:
pass
try:
import sha
imports['sha1']=True
except:
pass
class todocopy:
"Todo Copy will copy, zip,and FTP file backups. It can be commanded directly or through a batch file."
archivePath =""
archiveName = ""
curArchiveName = ""
curZip = None
curZipInc = 0
ignoreSVN = True
exclusionList = ("jpg","ppt","gif","mov","avi","mp3","swf")
archiveList = []
examples = []
cmdOptions = None
cmdArgs = None
curSpaces = " "
testMode = False
reportLevel = 2
reportSampleInc = 0
reportSampleFreq = 8
props={'ignoresvndir':1,'noarchive':0,'srcPath':'','destPath':'','archiveFile':'',
'reportLevel':2,'reportSampleInc':0,'reportSampleFreq':8,'quietmode':0,
'db_host':'localhost','db_username':'root','db_password':'', 'db_name':'mysql','db_port':3306,
'profile_begin_id':0
}
tags={}
commandList = {}
fileLists = {}
targetList = {}
basePath = ""
reportInc = 0
mysqlconn = None
def __init__(self):
self.data = []
self.processors = []
self.returnStr = ""
self.ignoreSVN = True
self.tags['DATE'] = strftime("%m%d%y")
self.tags['TIME'] = strftime("%H%M")
curMonth = 5
curYear = 2009
#self.tags['FIRSTDAY'] = self.firstDay(curMonth,curYear)
#self.tags['LASTDAY'] = self.lastDay(curMonth,curYear)
self.tags['COUNTER'] = 0
if self.curOS()=='linux':
self.tags['COLOR_RED'] = '\033[0;31m'
self.tags['COLOR_BLUE'] = '\033[0;34m'
self.tags['COLOR_GREEN'] = '\033[0;32m'
self.tags['COLOR_YELLOW'] = '\033[0;33m'
self.tags['COLOR_END'] = '\033[m'
else:
self.tags['COLOR_RED'] = ''
self.tags['COLOR_BLUE'] = ''
self.tags['COLOR_GREEN'] = ''
self.tags['COLOR_YELLOW'] = ''
self.tags['COLOR_END'] = ''
# Get the current revision for output header
def getRev(self):
revStr= "$Rev: 62 $"
tempArray = revStr.split()
return "1.3."+str(tempArray[1])
# Replace {tag} items in specified string
def replaceTags(self,outStr):
for key, value in self.tags.iteritems():
outStr = outStr.replace("{" + key + "}", str(value));
return outStr
# Test the passed file to see if the three letter file extension is in the exclusion list
def testExtension(self,inFile):
return inFile[-3:].lower() in self.exclusionList
# For status output, all messages are passed here and then based on user preferences,
# different levels of output are used.
def report(self,inStr,inInc=0):
self.reportInc += inInc
# 0 = Report all
if self.reportLevel<1:
print inStr
# 1 = Report most
elif self.reportLevel<2:
print inStr
# 2 = Report sample
elif self.reportLevel<3:
if self.reportSampleInc % int(self.props['reportSampleFreq']) == 0:
print "\n#"+str(self.reportInc)+":sample: " + inStr
else:
print ".",
self.reportSampleInc += 1
# 3 = Report status marker
elif self.reportLevel<4:
print ".",
# 4 = Report most
elif self.reportLevel<5:
print inStr
def relativePath(self,basePath,srcPath):
outPath = srcPath
bpLen = len(self.basePath)
if bpLen>0:
# If paths are the same, remove base path
if os.path.abspath(srcPath[0:bpLen])==os.path.abspath(basePath):
# Shave off the preceding slash
if srcPath[bpLen:bpLen+1] == '/' or srcPath[bpLen:bpLen+1] == '\\':
bpLen += 1
return srcPath[bpLen:]
return srcPath
def copyFile(self, inPath,destPath):
success = False
(srcPath,fileName) = os.path.split(inPath)
outPath = os.path.join(destPath,srcPath)
bpLen = len(self.basePath)
if bpLen>0:
# If paths are the same, remove base path
if os.path.abspath(srcPath[0:bpLen])==os.path.abspath(self.basePath):
outPath = os.path.join(destPath,srcPath[bpLen:])
src = os.path.join(srcPath,fileName)
dest = os.path.join(destPath,srcPath,fileName)
self.report("Copying file:" + src+ " to: "+dest)
try:
if not os.path.isdir(outPath):
os.makedirs(outPath)
shutil.copyfile(src,os.path.join(outPath,fileName))
success = True
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (src, dest, str(why))
return success
# Create XML-based manifest of files to backup with CRC32 checksums
def createManifest(self,compareManifestName=None):
# If a manifest name was passed, use it for comparison
if compareManifestName:
pass
def ftpSendFile(self,inFtpRef,inFileName):
success = False
myFile = open(inFileName,'rb')
(fName,fExt) = os.path.splitext(inFileName)
try:
inFtpRef.storbinary("STOR " + inFileName,myFile)
success = True
except Exception:
print "Upload failed!"
myFile.close()
return success
def ftpConnect(self,inURL,inUsername,inPassword):
# "anonymous" "Anonymous" "anonymous" "Anonymous":
ftph = ftplib.FTP(inURL)
ftph.login(inUsername,inPassword)
print ftph.getwelcome()
ftph.cwd("bu")
curFtpPath = ftph.pwd()
print "Current path:"+curFtpPath
return ftph
# ftph.mkd(mkdirname)
#upload(ftph,lf)
#ftph.close()
#ftph = FTP(host_name)
#ftph.login(user,pwd)
#ftph.cwd(rmdirname)
#allfiles = ftph.nlst()
# print ftph.dir()
# Get the manifest on the remote directory
def ftpGetManifest(self):
pass
def ftpArchiveList(self,inList,ftpURL,username,password):
connectPtr = self.ftpConnect(ftpURL,username,password)
#for arc in inList:
(head, tail) = os.path.split(inList)
print "Sending "+tail+"...\n"
# TODO: The FTP chokes on full paths, so the tail is being used. This is a prob for the crontab though
self.ftpSendFile(connectPtr,tail)
print "Completed ftp."
def doExec(self,cmdStr,asArray=True,standardError=True):
cmdStr = self.replaceTags(cmdStr)
rows = []
outStr = ''
if standardError:
cmdStr += " 2>&1"
myProcess = os.popen(cmdStr)
for row in myProcess.readlines(): # run find command
if asArray:
rows.append(row)
else:
outStr += row
exitStatus = myProcess.close()
# Check if error occurred
if exitStatus:
outStr = "Error:"+outStr
rows.insert(0,"\n*** Error: ***\n")
else:
pass
if asArray:
return rows
else:
return outStr
def attr(self,attrList,attrName,default=''):
if attrList.has_key(attrName):
return attrList[attrName]
else:
return default
def addExample(self,cmdName,example,desc):
self.examples.append({'name':cmdName,'example':example,'desc':desc})
def curOS(self):
if sys.platform == "darwin":
return 'mac'
elif sys.platform == "win32":
return 'win'
elif sys.platform.find("linux") != -1:
return 'linux'
else:
return 'linux'
def getFileList(self,listPath):
try:
f=open(listPath, 'r')
fileList = f.read()
f.close()
except (IOError, os.error), why:
print "Can't read filelist(%s): %s" % (inArgs[0], str(why))
return None
return fileList.split("\n")
def firstDay(self,inMonth,inYear):
tempDay=datetime.date(inYear,inMonth,1)
return tempDay.strftime("%Y-%m-%d")
def lastDay(self,inMonth,inYear):
monthInfo=calendar.monthrange(inYear,inMonth)
tempDay=datetime.date(inYear,inMonth,monthInfo[1])
return tempDay.strftime("%Y-%m-%d")
# ------------- TASKS -------------
# These tasks can be included in a batch file and perform specific operations
def taskFindAbove(self,inAttr):
basePath = inAttr.get('src','')
fileName= inAttr.get('filename','')
if len(basePath)==0:
basePath = os.getcwd()
curPath = os.path.abspath(basePath)
for i in range(10):
testPath = os.path.join(curPath,fileName)
if os.path.isfile(testPath):
print "Found at:"+testPath
return curPath
else:
curPath = os.path.abspath(os.path.join(curPath,'../'))
if(not os.path.isdir(curPath)):
break
print "Can't find file:"+fileName
return ''
def taskJoomla(self,inAttr):
action = inAttr.get('action','')
if action == 'getconfig':
configPath = inAttr.get('src','')
if len(configPath)==0:
# search for config in current directory and then 10 relative paths up
configPath = self.taskFindAbove({'src':'./','filename':'configuration.php'})
if len(configPath)>0:
configPath = os.path.join(configPath,'configuration.php')
if len(configPath)>0:
print configPath
f = open(configPath,'r')
configText = f.read()
f.close()
configText = configText.replace('var ','')
configArray = configText.split("\n")
joomlaVars = {}
for entry in configArray:
tempVar = entry.split('=')
if(len(tempVar)==2):
keyName = tempVar[0].replace('$','').strip()
tempValue = tempVar[1].replace('"','').replace("'",'').replace(";",'').strip()
joomlaVars[keyName] = tempValue
#print joomlaVars
if 'host' in joomlaVars:
self.props['db_host'] = joomlaVars['host']
if 'db' in joomlaVars:
self.props['db_name'] = joomlaVars['db']
if 'user' in joomlaVars:
self.props['db_username'] = joomlaVars['user']
if 'password' in joomlaVars:
self.props['db_password'] = joomlaVars['password']
if len(joomlaVars)>0:
print "Found and processed Joomla configuration file."
else:
print "The Joomla configuration.php file is not found."
return
def taskExamples(self,inArgs):
print "\n--------- Todo Copy Command Line Examples --------- \n"
print 'CmdName'.ljust(15)+'Example'.ljust(55)+'Description'
print '-------'.ljust(15)+'-------'.ljust(55)+'-----------'
for example in self.examples:
print example['name'].ljust(15)+example['example'].ljust(55)+example['desc']
def taskSHA1(self,inAttr):
if 'sha1' in imports:
plaintext = inAttr.get('source','')
encoderSHA1 = sha.new()
encoderSHA1.update(plaintext)
print plaintext+' = '+encoderSHA1.hexdigest()
else:
print "Missing SHA1 encoding library"
def taskMD5(self,inAttr):
if 'md5' in imports:
plaintext = inAttr.get('source','')
encoderMD5 = md5.new()
encoderMD5.update(plaintext)
print plaintext+' = '+encoderMD5.hexdigest()
else:
print "Missing MD5 encoding library"
def taskSetPath(self,inPath):
myOS = self.curOS()
if myOS == 'win':
pathCmd = "set PATH=%PATH%;"+os.path.join(os.getcwd())
#print pathCmd
print sys.path
sys.path.append(os.getcwd())
print sys.path
#os.system(pathCmd)
#print self.doExec(pathCmd,False)
elif myOS == 'linux':
pass
elif myOS == 'mac':
pass
def taskCreateList(self,inArgs):
srcDir = inArgs['src']
destDir = inArgs['dest']
self.basePath = srcDir
recurse = self.props.get('recursive',0)
dirList = []
fileList = []
i = 0
if recurse:
for curPath,dirs,files in os.walk(srcDir):
for inName in files:
fileList.append(self.relativePath(srcDir,os.path.join(curPath,inName)))
i += 1
if self.ignoreSVN:
if '.svn' in dirs:
dirs.remove('.svn') # ignore the SVN metadata directories
for inName in dirs:
tempDirPath = self.relativePath(srcDir,os.path.join(curPath,inName))
tempDirPath += os.sep
dirList.append(tempDirPath)
#walkLog.close()
else:
fileList = os.listdir(srcDir)
# Copy the return value so we can change 'fileList'
fileList = fileList[:]
# Remove SVN
if fileList[0]=='.svn' and self.ignoreSVN:
del fileList[0]
# Add trailing '/' if item is a directory
dircache.annotate(os.sep, fileList)
if len(destDir)> 0:
if(destDir[:1]=='['):
print "Setting property:"+destDir[1:-1]
else:
outputType = inArgs.get('type','newline')
if outputType == 'newline':
f = open(destDir,'w')
f.write("\n".join(dirList))
f.write("\n".join(fileList))
f.close()
elif outputType =='comma':
import csv
writer = csv.writer(open(destDir, "wb"))
writer.writerows([dirList,fileList])
print "File list output complete to:"+destDir
else:
print "\n".join(dirList)
print "\n".join(fileList)
def taskCopy(self,inArgs):
#def copyFiles(self,srcDir,destDir,
recurse=True
inLog=False
srcDir = inArgs['src']
destDir = inArgs['dest']
#print srcDir, destDir
self.returnStr = ""
self.ignoreSVN = True
self.basePath = srcDir
i = 0
#walkLog=open(walkDir+'codescan_log.txt', 'w')
if recurse:
for curPath,dirs,files in os.walk(srcDir):
for inName in files:
#if inName[-3:] == "php":
# processFile(inName,curPath,walkLog)
#print "Copy file:"+curPath+"/"+inName+" to "+destDir+curPath+"/"+inName
self.copyFile(os.path.join(curPath,inName),destDir)
i += 1
if self.ignoreSVN:
if '.svn' in dirs:
dirs.remove('.svn') # ignore the SVN metadata directories
#walkLog.close()
if i>1:
print "\nCopied "+str(i)+" files"
return True
def taskCopyList(self,inArgs):
fileList = self.getFileList(inArgs['filelist'])
if fileList:
i = 0
for curFile in fileList:
if(len(curFile)>0):
if self.copyFile(curFile, inArgs['dest']):
i += 1
self.report("Copied "+str(i)+" files.")
def taskFTPList(self,inArgs):
fileList = self.getFileList(inArgs['filelist'])
if fileList:
# TODO: Add FTP connection
i = 0
for curFile in fileList:
if(len(curFile)>0):
(head, tail) = os.path.split(curFile)
self.report("Sending "+tail+"...\n")
# TODO: The FTP chokes on full paths, so the tail is being used. This is a prob for the crontab though
if self.ftpSendFile(connectPtr,tail):
i += 1
self.report("FTPed "+str(i)+" files.")
def taskExec(self,inAttr):
if inAttr:
cmdStr = ''
if inAttr.has_key('value'):
cmdStr = inAttr['value'].value
if inAttr.has_key('executable'):
cmdStr = inAttr['executable'].value
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
def taskSVNDirList(self,inAttr):
if self.cmdOptions.recursive:
recurse = 1
else:
recurse = inAttr.get('recurse', 0)
recurseStr = ''
if recurse:
recurseStr = ' -R '
revStr = inAttr.get('revstr', '')
if self.cmdOptions.source:
srcDir = self.cmdOptions.source
else:
srcDir = inAttr.get('src', ' . ')
svnDirs = []
svnFiles = []
cmdStr = "svn info "+recurseStr+' '+srcDir+" --xml"
print cmdStr
xmlStr = self.doExec(cmdStr,False)
if xmlStr[:6] == "Error:":
endError = xmlStr.find("\n")
print xmlStr[:endError]
return
svnDOM = xml.dom.minidom.parseString(xmlStr)
targets = svnDOM.getElementsByTagName("info").item(0).childNodes
# Find all the files and directories in this working copy tree
print "Beginning svn folder recurse..."
for target in targets:
elementName = target.localName
if elementName == 'entry':
entryKind = target.getAttribute('kind')
if entryKind == 'dir':
urlNode = target.getElementsByTagName("url")
svnDirs.append(urlNode.item(0).childNodes.item(0).nodeValue)
print ".",
elif entryKind == 'file':
urlNode = target.getElementsByTagName("url")
svnFiles.append(urlNode.item(0).childNodes.item(0).nodeValue)
print "Total svn dirs:"+str(len(svnDirs))
return svnDirs
def taskSVNFindReplaceProp(self,inAttr):
myDirs = self.taskSVNDirList(self,inAttr)
# Look through directories
for myDir in myDirs:
print myDir
propGetStr = "svn propget "+myDir
# returnArray = self.doExec(propGetStr)
def taskSVN(self,inAttr):
# TODO: Process return info as XML and report specific items.
action = inAttr.get('action', '')
if action=='status':
cmdStr = "svn status -u --xml"
xmlStr = self.doExec(cmdStr,False)
svnDOM = xml.dom.minidom.parseString(xmlStr)
entries = svnDOM.getElementsByTagName("target").item(0).childNodes
modFiles = []
for entry in entries:
elementName = entry.localName
if elementName == 'entry':
status = ''
repoElement = entry.getElementsByTagName("repos-status")
if repoElement:
status = repoElement.item(0).getAttribute('item')
if status == 'modified':
modFiles.append(entry.getAttribute('path'))
if len(modFiles)>0:
print "\n____ Modified Files in Repository ____"
for fileName in modFiles:
print fileName
#print xmlStr # ''.join(returnArray)
# Sync will revert all modified files, remove non-versioned files and (empty) dirs, and do an SVN UP
# This is perfect for a moving files from a dev checkout to a production checkout
elif action=='sync':
cmdStr = "svn revert . -R"
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
cmdStr = "svn status --xml"
returnArray = self.doExec(cmdStr)
#print ''.join(returnArray)
xmlStr = ''.join(returnArray)
svnDOM = xml.dom.minidom.parseString(xmlStr)
targets = svnDOM.getElementsByTagName("target").item(0).childNodes
for target in targets:
elementName = target.localName
if elementName == 'entry':
status = target.getElementsByTagName("wc-status").item(0).getAttribute('item')
if status == 'unversioned':
delPath = target.getAttribute('path')
print "Deleting:" + delPath
if os.path.isdir(delPath):
try:
os.rmdir(delPath)
except (IOError, os.error), why:
print "Can't delete %s: %s" % (delPath,str(why))
else:
os.remove(delPath)
cmdStr = "svn up"
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
elif action=='property':
filePath = inAttr.get('path', '')
propName = inAttr.get('name', '')
# If there is a value, then it's a set, otherwise a get
if 'value' in inAttr:
propVal = inAttr.get('value', '')
# If property has newline characters, SVN won't honor them from the command line,
# so they need to be written to a temp file which does work
if propVal.find("\\n") != -1:
propVal = propVal.replace("\\n","\n")
tempFilename = 'tempSVNPropSet.txt'
# Write property value to the temp file
f = open(tempFilename,'w')
f.write(propVal)
f.close()
cmdStr = "svn propset "+propName+" -F "+tempFilename+" "+filePath
returnArray = self.doExec(cmdStr)
os.remove(tempFilename)
else:
cmdStr = "svn propset "+propName+' "'+propVal+'" '+filePath
print cmdStr
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
else:
print "Get "+propName
elif action=='dir':
self.taskSVNDirList(inAttr)
elif action=='findreplaceprop':
self.taskSVNFindReplaceProp(inAttr)
elif action=='add':
filePath = inAttr.get('path', '')
if filePath:
cmdStr = "svn add "+filePath
print cmdStr
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
elif action=='log':
recurse = inAttr.get('recurse', 0)
recurseStr = ''
if recurse:
recurseStr = ' -R '
revStr = inAttr.get('revstr', '')
srcDir = inAttr.get('src', ' . ')
svnDirs = []
svnFiles = []
cmdStr = "svn info "+recurseStr+' '+srcDir+" --xml"
xmlStr = self.doExec(cmdStr,False)
svnDOM = xml.dom.minidom.parseString(xmlStr)
targets = svnDOM.getElementsByTagName("info").item(0).childNodes
# Find all the files and directories in this working copy tree
print "Beginning svn folder recurse..."
for target in targets:
elementName = target.localName
if elementName == 'entry':
entryKind = target.getAttribute('kind')
if entryKind == 'dir':
urlNode = target.getElementsByTagName("url")
svnDirs.append(urlNode.item(0).childNodes.item(0).nodeValue)
print ".",
elif entryKind == 'file':
urlNode = target.getElementsByTagName("url")
svnFiles.append(urlNode.item(0).childNodes.item(0).nodeValue)
print "Total svn dirs:"+str(len(svnDirs))
# Output the logs for all the directories
logArray = []
for curDir in svnDirs:
# print curDir,len(curDir)
cmdStr = "svn log "+revStr+" " +curDir+ " --xml --verbose "
xmlStr = self.doExec(cmdStr,False)
if xmlStr[0:len("Error:")] != "Error:":
logArray.append(xmlStr)
print "Logs:"+str(len(logArray))
revMessages = {}
totalPaths = {}
# Parse the logs to find the messages
for xmlStr in logArray:
svnDOM = xml.dom.minidom.parseString(xmlStr)
targets = svnDOM.getElementsByTagName("log").item(0).childNodes
for target in targets:
elementName = target.localName
if elementName == 'logentry':
entryRev = target.getAttribute('revision')
logDate = target.getElementsByTagName("date").item(0).childNodes.item(0).nodeValue
msg = target.getElementsByTagName("msg").item(0).childNodes.item(0).nodeValue
# Only add if revision has not already been logged
if entryRev not in revMessages:
logPathArray = []
# Get the list of path elements
pathTargets = target.getElementsByTagName("paths").item(0).childNodes
for pathItem in pathTargets:
elementName = pathItem.localName
if elementName == 'path':
logPathArray.append(pathItem.childNodes.item(0).nodeValue)
revMessages[entryRev] = {'date':logDate,'msg':msg,'paths':logPathArray}
for mypath in logPathArray:
if mypath not in totalPaths:
totalPaths[mypath] = 1
#print revMessages
print "___________ Logs ___________"
revKeys = revMessages.keys()
revKeys.sort()
tb = "\t"
for revEntry in revKeys:
print 'Rev:'+revEntry+tb+revMessages[revEntry]['date']+tb+revMessages[revEntry]['msg']+tb+'Files:'+','.join(revMessages[revEntry]['paths'])
print "___________ Files Changed ___________"
pathList = totalPaths.keys()
pathList.sort()
for mypath in pathList:
print mypath
# Output a crontab string from human-readable batch input
def taskCrontab(self,inAttr):
crCmd = inAttr.get('cmd', '/etc/myprog')
crOutfile = inAttr.get('log', '')
if len(crOutfile)>0:
crOutfile = " > "+crOutfile
crMonth = inAttr.get('month', '*')
crDay = inAttr.get('day', '*')
crHour = inAttr.get('hour', '*')
crMin = inAttr.get('min', '*')
dow = inAttr.get('dayofweek').lower()
dowNum = '*'
if(dow=='sunday' or dow=='sun'):
dowNum='0'
elif(dow=='monday' or dow=='mon'):
dowNum='1'
elif(dow=='tuesday' or dow=='tue'):
dowNum='2'
elif(dow=='wednesday' or dow=='wed'):
dowNum='3'
elif(dow=='thursday' or dow=='thu'):
dowNum='4'
elif(dow=='friday' or dow=='fri'):
dowNum='5'
elif(dow=='saturday' or dow=='sat'):
dowNum='6'
print "crontab string: "+crMin+" "+crHour+" "+crDay+" "+crMonth+" "+dowNum+" "+crCmd+" "+crOutfile
def taskEmail(self,inAttr):
print "Sending email."
# TODO: Check OS type property
emailMethod = inAttr.get('method', 'smtp')
emailFrom = inAttr.get('from', 'todocopy@example.com')
emailTo = inAttr.get('to', 'danr@example.com')
emailSubject = inAttr.get('subject', 'Todo Copy test {DATE}')
emailSubject = self.replaceTags(emailSubject)
emailBody = inAttr.get('body', "The SC Body\nSome more text.\n")
emailBody = self.replaceTags(emailBody)
emailAttachments = inAttr.get('attach', "")
if emailMethod=='sendmail':
cmdSendMail = "/usr/sbin/sendmail"
p = os.popen("%s -t -v " % cmdSendMail, "w")
p.write("To: "+emailTo+"\n")
p.write("From: "+emailFrom+"\n")
p.write("Subject: "+emailSubject+"\n")
p.write("\n")
p.write(emailBody)
sts = p.close()
if sts != 0:
print "Sendmail exit status", sts
else:
server = smtplib.SMTP('localhost')
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = emailFrom
msg['To'] = emailTo
msg['Date'] = email.Utils.formatdate(localtime=True)
msg['Subject'] = emailSubject
msg.attach(email.MIMEText.MIMEText(emailBody))
attachments = emailAttachments.split(",")
for attachment in attachments:
attach = attachment.strip()
if len(attach)>0:
part = email.MIMEBase.MIMEBase('application', "octet-stream")
part.set_payload(open(attach,"rb").read())
email.Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
server.sendmail(emailFrom, emailTo.split(","), msg.as_string())
server.quit()
# Output a crontab string from human-readable batch input
def taskWorkingDir(self,inAttr):
chPath = os.path.abspath(inAttr.get('value', ''))
try:
os.chdir(chPath)
self.report("Changed to directory:"+chPath)
except (IOError, os.error), why:
print "Can't change to directory %s: %s" % (delPath,str(why))
# Make a directory with sub-directories if necessary
def taskMkDir(self,inAttr):
try:
destPath = os.path.abspath(inAttr.get('value', ''))
os.makedirs(destPath)
except (IOError, os.error), why:
print "Error creating directory: %s -- %s" % ( destPath, str(why))
def taskDBSummary(self,inAttr):
if 'mysql' in imports:
outStr = ""
try:
conn = MySQLdb.connect (host = self.props['db_host'], user = self.props['db_username'],passwd = self.props['db_password'], db = self.props['db_name'])
cursor = conn.cursor()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return
dbname = self.props['db_name']
try:
if 'tablelist' not in inAttr:
inAttr['tablelist'] = ''
if 'excludelist' not in inAttr:
inAttr['excludelist'] = {}
else:
inAttr['excludelist'] = inAttr['excludelist'].split(",")
pauseBetween = inAttr.get('pausebetween',0);
checksum = inAttr.get('checksum',0);
pauseBetween = inAttr.get('pausebetween',0);
onlymissing = inAttr.get('onlymissing',0);
getLenText = int(inAttr.get('getlentext',0));
textColLog = "textColLog.txt"
logFile = open(textColLog,'w')
logFile.write("Table\tColumn\tMax Entry Length\n")
logFile.close()
if inAttr['tablelist'] != '':
tempTables = inAttr['tablelist'].split(",")
tables = []
for tempTable in tempTables:
tables.append([tempTable,''])
else:
sql = "SHOW TABLES FROM "+dbname
cursor.execute(sql)
tables = cursor.fetchall()
outStr += "<?xml version='1.0' encoding='UTF-8' ?>\n"
outStr += "<database name='"+dbname+"' server='"+self.props['db_host']+"' date='"+strftime("%Y-%m-%d")+"' tables='"+str(len(tables))+"' >\n";
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return
try:
cursorDict = conn.cursor(MySQLdb.cursors.DictCursor)
fieldStr = ''
fieldList = ''
for table in tables:
# If table is listed in the excludelist, skip it
if table[0] in inAttr['excludelist']:
continue
colsCount = 0
# Pause between table reads if executing on production server so resources aren't monopolized
if pauseBetween:
print "(pause)",
time.sleep(float(pauseBetween))
self.report("Summarizing table:"+table[0],1)
sql = "SHOW FIELDS FROM "+table[0]
cursorDict.execute(sql)
cols = cursorDict.fetchall()
# TODO: Create list of fields for CRC routine
fieldStr = ''
fieldList = ''
textMaxList = ''
hasPrimaryKey = 0
for col in cols:
colType = col['Type'].replace("'",'"')
if fieldList != '':
fieldList += ','
fieldList += '`' + str(col['Field']) + '`'
colsCount += 1
priKey = '0'
if col['Key']=="PRI":
priKey = '1'
hasPrimaryKey = 1
fieldStr += "\t\t<field name='"+str(col['Field'])+"' type='"+str(colType)+"' primary='"+priKey+"' null='"+str(col['Null'])+"' key='"+str(col['Key'])+"' default='"+str(col['Default'])+"' extra='"+str(col['Extra'])+"' />\n"
if str(colType)=='text' and getLenText==1:
sql = "select max(length(`"+str(col['Field'])+"`)) as maxlen FROM "+table[0]
try:
cursorDict.execute(sql)
maxlen = cursorDict.fetchall()
except:
print "Failed with query:"+sql
try:
textMaxList += "\t"+str(col['Field'])+"\t"+str(maxlen[0]['maxlen'])+"\n"
except:
pass
#print "Text field: "+str(col['Field'])+" Max Entry Length: "+str(maxlen[0]['maxlen'])
if len(textMaxList)>0:
logFile = open(textColLog,'a')
logFile.write("\n"+table[0]+"\n")
logFile.write(textMaxList)
logFile.close()
sql = "SHOW INDEXES FROM "+table[0]
cursorDict.execute(sql)
indexes = cursorDict.fetchall()
indexStr = ''
indexCount = len(indexes)
hasPrimaryIndex = 0
for curIndex in indexes:
if str(curIndex['Key_name']) == 'PRIMARY':
hasPrimaryIndex = 1
indexStr += "\t\t<index keyname='"+str(curIndex['Key_name'])+"' colname='" +str(curIndex['Column_name'])
indexStr += "' collation='"+str(curIndex['Collation'])+"' null='"+str(curIndex['Null'])+"' comment='"+str(curIndex['Comment'])
indexStr += "' sequence='"+str(curIndex['Seq_in_index'])+"' non_unique='"+str(curIndex['Non_unique'])+"' />\n";
if checksum:
tableName = table[0]
sql = "set @checksum := '', @rowCount := 0 "
cursorDict.execute(sql)
sql = "select min(least(length(@checksum := md5(concat( @checksum, md5(concat_ws('|', "+fieldList+" ))))), @rowCount := @rowCount + 1)) as beNull from "+tableName #+" use index(PRIMARY) "
try:
cursorDict.execute(sql)
except:
print "\nWarning: Table "+tableName+" has no PRIMARY index\n"
sql = "select @checksum crc, @rowCount rows"
cursorDict.execute(sql)
checksumInfo = cursorDict.fetchall()
checksumRows = checksumInfo[0]['rows']
checksumVal = checksumInfo[0]['crc']
else:
checksumRows = '0'
checksumVal = ''
emptyTable = " empty='0' "
if(int(checksumRows)==0):
emptyTable = "\n\t\t empty='1' "
if (onlymissing and (hasPrimaryKey==0 or hasPrimaryIndex==0 or indexCount==0)) or onlymissing==False:
outStr += "\t<table name='"+table[0]+"' rows='"+str(checksumRows)+"' checksum='"+checksumVal+"' cols='"+str(colsCount)+"' "
outStr += " indexes='" + str(indexCount) + "' " + "' hasprimarykey='" + str(hasPrimaryKey) + "' hasprimaryindex='" + str(hasPrimaryIndex) + "' "
outStr += emptyTable+" >\n";
outStr += fieldStr
outStr += indexStr
outStr += "\t</table>\n" ;
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return
outStr += "\n</database>"
cursor.close()
conn.close()
if self.cmdOptions.outfile or ('output' in inAttr):
if self.cmdOptions.outfile:
outFilename = self.cmdOptions.outfile
else:
outFilename = inAttr['output']
try:
f=open(outFilename,'w')
f.write(outStr)
f.close()
except (IOError, os.error), why:
print "Can't write to %s: %s" % (outFilename,str(why))
else:
print outStr
else:
print "DBSummary requires MySQL library to be installed and none found.\nOn RedHat, try 'yum install mysql-python' or 'yum install MySQL-python'\n"
# Output to screen or files information supplied in <log> tags
def taskLog(self,inAttr):
logFile = inAttr.get('output','')
logFile = self.replaceTags(logFile)
if len(logFile)>0:
self.props['logfile'] = logFile
msg = inAttr.get('msg','')
if len(msg)>0:
msg = self.replaceTags(msg)
includeDate = inAttr.get('date', '')
toFile = inAttr.get('tofile', '')
if(toFile=='0'):
toFile = False
toScreen = inAttr.get('toscreen', '')
if(toScreen=='0'):
toScreen = False
if(len(logFile)==0):
logFile = "tc_log.log"
dateStr = ''
if includeDate != '0':
dateStr = strftime("%y%m%d-%H:%M") + "\t"
logStr = dateStr + msg+"\n"
if(toFile):
try:
f=open(logFile, 'a')
f.write(logStr)
f.close()
except (IOError, os.error), why:
print "Can't write %s to %s: %s" % (logStr,logFile,str(why))
if(toScreen):
print logStr
def taskFTP(self,fileList,ftpURL,username,password):
print "\nStarting ftp to "+ftpURL+"..."
passList = (fileList)
self.ftpArchiveList(passList,ftpURL,username,password)
def taskZip(self,inSrc,inDest,inFile,recurse=True,inLog=False):
if self.props['noarchive']:
print "No archiving, just transfer file."
self.archiveList.append(self.props['noarchive'])
else:
if inFile:
self.curArchiveName = os.path.join(inDest,inFile+"_"+str(self.curZipInc)+".zip")
self.curZip = zipfile.ZipFile(self.curArchiveName,'w') # ,zipfile.ZIP_DEFLATED
self.archiveList.append(self.curArchiveName)
if self.testMode:
print "zipping:"+inSrc+" recurse:"+str(recurse)+" to:"+self.curArchiveName
self.processDir(inSrc,recurse,inLog)
if self.props['archiveFile']:
self.curZip.close()
def taskSCP(self,inAttr):
if sys.platform == "darwin":
scpName = "scp"
elif sys.platform == "win32":
scpName = "pscp"
elif sys.platform.find("linux") != -1:
scpName = "scp"
else:
scpName = "scp"
portStr = "10018"
srcPath = "add_to_favorites.php"
destUser = "root"
destURL = "stage.apmmusic.com"
destPath = "/var/www/html/testapm/myapm/"
sendStr = scpName + " -P " + portStr + " " + srcPath + " " + destUser + "@" + destURL + ":" + destPath
print sendStr
srcPath = "/var/www/html/testapm/myapm/add_to_favorites.php"
destPath = "add_to_favorites.php"
receiveStr = scpName + " -P " + portStr + " " + destUser + "@" + destURL + ":" + srcPath + " " + destPath
print receiveStr
def mysqlConnect(self):
try:
conn = MySQLdb.connect (host = self.props['db_host'], user = self.props['db_username'],passwd = self.props['db_password'], db = self.props['db_name'],port=int(self.props['db_port']))
self.mysqlconn = conn
return conn
except MySQLdb.Error, e:
print "MySQL Error %d: %s" % (e.args[0], e.args[1])
return False
def printRow(self,inRow,fields=None):
outStr = ''
for i in fields:
outStr += str(inRow[i])+"\t"
return outStr
# Perform MySQL tasks
def taskMySQL(self,inAttr=None):
action = self.props['action']
if action=='dump':
# Dump the MySQL database
self.taskDBDump(inAttr)
elif action=='import':
self.taskDBImport(inAttr)
elif action=='profile':
# Perform a timing profile of SQL statements
self.taskDBProfile(inAttr)
elif action=='query':
self.taskDBQuery(inAttr)
elif action=='importfiles':
# Import a directory full of .sql files and add permissions to specified user
self.taskDBImportFiles(inAttr)
elif action=='load':
print "Loading database:"+self.props['db_name']
pathMySQL = os.path.join(self.props['path_mysql'],'mysql')
loadFile = os.path.join('',self.props['srcPath'])
dbSelect = inAttr.get('db_name','')
if dbSelect:
dbSelect = ' -D '+dbSelect
cmdStr = os.path.normpath(pathMySQL)+" -h "+self.props['db_host']+" -u "+self.props['db_username']+" -p"+self.props['db_password']+" -P"+str(self.props['db_port'])+dbSelect+" < "+loadFile
#print cmdStr
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
def taskDBProfile(self,inAttr=None):
profileAction = inAttr.get('type','')
if 'mysql' in imports:
if not self.mysqlconn:
conn = self.mysqlConnect()
if conn == False:
return False
else:
conn = self.mysqlconn
cursorDict = conn.cursor(MySQLdb.cursors.DictCursor)
if profileAction == 'begin':
self.report('Beginning profile...')
# Clear the caches so the profile execution is accurate
sql = "RESET QUERY CACHE;"
cursorDict.execute(sql)
sql = "FLUSH TABLES;"
cursorDict.execute(sql)
try:
sql = "SET PROFILING=1;"
cursorDict.execute(sql)
except MySQLdb.Error, e:
if e.args[0]==1193:
print "\n\nERROR: Could not activate MySQL Profiling system. Profiling requires Community Edition MySQL 5.0.37 or above.\n"
else:
print "\nMySQL error %d: %s" % (e.args[0], e.args[1])
return False
# Find the last query number already in the profile table
sql = "SHOW PROFILES;"
cursorDict.execute(sql)
tempResult = cursorDict.fetchall()
if not len(tempResult):
self.props['profile_begin_id'] = 1
else:
self.props['profile_begin_id'] = int(len(tempResult))+1
self.report("Starting profile at query id:"+str(self.props['profile_begin_id']))
elif profileAction == 'end':
if not self.mysqlconn:
conn = self.mysqlConnect()
if conn == False:
return False
else:
conn = self.mysqlconn
cursorDict = conn.cursor(MySQLdb.cursors.DictCursor)
try:
sql = "SET PROFILING=0;"
cursorDict.execute(sql)
except MySQLdb.Error, e:
if e.args[0]==1193:
print "\n\nERROR: Could not activate MySQL Profiling system. Profiling requires Community Edition MySQL 5.0.37 or above.\n"
else:
print "\nMySQL error %d: %s" % (e.args[0], e.args[1])
return False
sql = "SHOW PROFILES;"
cursorDict.execute(sql)
tempResult = cursorDict.fetchall()
if len(tempResult)>0:
queries = []
# Get information about each profile
for i in range(self.props['profile_begin_id']-1,len(tempResult)):
queries.append(tempResult[i])
curQuery = tempResult[len(tempResult)-1]['Query_ID']
self.props['profile_end_id'] = int(curQuery)
# Get itemized profile information
for query in queries:
sql = "SHOW PROFILE FOR QUERY "+str(query['Query_ID'])
cursorDict.execute(sql)
tempResult = cursorDict.fetchall()
query['items'] = tempResult
print 'Profile complete for '+str(len(queries))+' queries.'
outStr = ''
for query in queries:
outStr += "Query: "+query['Query']+"\n"
curDuration = 0
for entry in query['items']:
outStr += "\t"+self.printRow(entry,['Status', 'Duration'])+"\n"
curDuration += float(entry['Duration'])
outStr += "\t\tQuery total: "+str(curDuration)+"\n"
profileFile = inAttr.get('output','profileOut.txt')
try:
f = open(profileFile,'w')
f.write(outStr)
f.close()
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (src, dest, str(why))
else:
print "need mysql-python"
def taskDBQuery(self,inAttr=None):
self.report('Querying: '+self.props['msg'])
if 'mysql' in imports:
if not self.mysqlconn:
try:
conn = MySQLdb.connect (host = self.props['db_host'], user = self.props['db_username'],passwd = self.props['db_password'], db = self.props['db_name'])
self.mysqlconn = conn
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return False
else:
conn = self.mysqlconn
cursorDict = conn.cursor(MySQLdb.cursors.DictCursor)
sql = inAttr.get('query','')
#print sql
if len(sql)>0:
try:
cursorDict.execute(sql)
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return
tables = cursorDict.fetchall()
self.report("Number of records:"+str(len(tables)),1)
else:
print "Empty query."
else:
# No Mysql-Python library, execute from the command line
rows = []
pathMySQL = os.path.normpath(os.path.join(self.props['path_mysql'],'mysql'))
queryCmd = pathMySQL+" -h "+self.props['db_host']+" -u "+self.props['db_username']+" -p"+self.props['db_password']+' --execute="'+self.props['query']+'" '+self.props['db_name']
for row in os.popen(queryCmd).readlines(): # run find command
rows.append(row)
return rows
def taskDBImportFiles(self,inAttr=None):
processFiles = inAttr.get('processfiles','0')
srcPath = self.props['srcPath'];
print 'MySQL import from '+srcPath+'...'
fileTotal = 0
for filename in os.listdir(os.path.abspath(srcPath)):
path = os.path.join(srcPath, filename)
if not os.path.isfile(path):
continue
if filename[-3:] == "sql":
fileTotal += 1
if processFiles=='1' and ('mysql' in imports):
sqlFile = os.path.join(os.path.abspath(srcPath),filename)
sqlText = ''
try:
f = open(sqlFile,'r')
sqlText = f.read()
f.close()
except:
print "Error opening file:"+sqlFile
# Replace all multi-line comments since MySQL often has problems with them
sqlText = re.sub("(/\*([^|]*?)\*/)"," ",sqlText)
# Replace tags
sqlText = self.replaceTags(sqlText)
print "processing file:"+sqlFile
self.taskDBQuery({'query':sqlText})
#print sqlText
else:
os.chdir(os.path.abspath(srcPath))
pathMySQL = os.path.normpath(os.path.join(self.props['path_mysql'],'mysql'))
importCmd = pathMySQL + " -h " + self.props['db_host'] + " -u " + self.props['db_username'] + " -p" + self.props['db_password'] + " --database=" + self.props['db_name'] + " < " + filename
#print importCmd
returnArray = self.doExec(importCmd)
print ''.join(returnArray)
#os.system(importCmd)
if filename[0:2] == "sp" or filename[0:2] == "fn":
spName = filename[:-4]
grantSQL = "GRANT EXECUTE ON PROCEDURE "+self.props['db_name']+"."+spName+" TO '"+self.props['sp_user']+"'@'localhost';"
grantCmd = pathMySQL+" -h "+self.props['db_host']+" -u "+self.props['db_username']+" -p"+self.props['db_password']+' --execute="'+grantSQL+'" '
os.system(grantCmd)
grantSQL = "GRANT EXECUTE ON PROCEDURE "+self.props['db_name']+"."+spName+" TO '"+self.props['sp_user']+"'@'%';"
grantCmd = pathMySQL+" -h "+self.props['db_host']+" -u "+self.props['db_username']+" -p"+self.props['db_password']+' --execute="'+grantSQL+'" '
os.system(grantCmd)
self.report("Import: "+importCmd)
def taskDBDump(self,inAttr=None):
dumpTables = inAttr.get('tables','')
dumpTables = dumpTables.replace("\n"," ")
print "Dumping:"+self.props['db_name'] + " " + dumpTables
pipeDir = ''
# Set default attributes
if len(inAttr.get('nodata',''))>0:
noData = " --no-data "
else:
noData = ""
if len(inAttr.get('locktables',''))>0:
skipLocks = ""
else:
skipLocks = " --skip-add-locks "
if len(inAttr.get('extendedinsert',''))>0:
extInsert = ""
else:
extInsert = " --extended-insert "
if len(inAttr.get('compress',''))>0:
compressSend = ""
else:
compressSend = " --compress "
tabDir = inAttr.get('tabdir','')
if len(dumpTables)>0:
dumpTables = " --tables " + dumpTables
if len(tabDir)>0:
tabDir = ' --tab="' + tabDir + '"'
destFile = self.props.get('dest_file','')
if len(destFile)>0:
dumpFile = " > " + os.path.join(self.props['destPath'],destFile)
# Can't do both
tabDir = ''
else:
dumpFile = ''
destDB = self.props.get('dest_db_name','')
# If this is a pipe transfer, eliminate tab and dump
if(len(destDB)>0):
tabDir = ''
dumpFile = ''
pathMySQL = 'mysql' #os.path.normpath(os.path.join(self.props['path_mysql'],'mysql'))
destHost = self.props.get('dest_db_host','')
destUsername = self.props.get('dest_db_username','')
destPassword = self.props.get('dest_db_password','')
destPort = self.props.get('dest_db_port','')
destSocket = self.props.get('dest_db_socket','')
if destHost:
destHost = " -h " + destHost
if destUsername:
destUsername = " -u " + destUsername
if destPassword:
destPassword = " -p" + destPassword
if destPort:
destPort = ' -P'+destPort+' '
if destSocket:
destSocket = ' --socket='+destSocket+' '
pipeDir = " | "+pathMySQL+" "+destHost+destUsername+destPassword+destPort+destSocket+" "+destDB
socketStr = self.props.get('db_socket','')
if socketStr:
socketStr = " --socket="+socketStr+' '
pathMySQLDump = os.path.normpath(os.path.join(self.props['path_mysql'],'mysqldump'))
cmdStr = pathMySQLDump+" -h "+self.props['db_host']+" -u "+self.props['db_username']+" -p"+self.props['db_password']+" -P"+str(self.props['db_port'])+noData+socketStr+" --databases "+self.props['db_name']+" "+dumpTables+dumpFile+extInsert+skipLocks+compressSend+tabDir+pipeDir
print "\n"+cmdStr+"\n"
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
def taskDBImport(self,inAttr=None):
importFiles = inAttr.get('path','')
print "Importing:"+self.props['db_name'] + " " + importFiles
if True:
destHost = self.props.get('dest_db_host','')
destUsername = self.props.get('dest_db_username','')
destPassword = self.props.get('dest_db_password','')
destPort = self.props.get('dest_db_port','')
destSocket = self.props.get('dest_db_socket','')
if destHost:
destHost = " -h " + destHost
if destUsername:
destUsername = " -u " + destUsername
if destPassword:
destPassword = " -p" + destPassword
if destPort:
destPort = ' -P'+destPort
if destSocket:
destSocket = ' --socket='+destSocket
importFiles = " " +os.path.join(self.props['destPath'],importFiles)
pathMySQL = os.path.join(self.props['path_mysql'],'mysqlimport')
cmdStr = os.path.normpath(pathMySQL)+destHost+destUsername+destPassword+destPort+destSocket+" "+self.props['dest_db_name']+" "+importFiles
#print cmdStr
returnArray = self.doExec(cmdStr)
print ''.join(returnArray)
# ----------------------------- End Tasks -----------------------------
def processFile(self,inDir,inFile):
if self.testMode:
pass
#print "Processing file:"+inDir+inFile
if self.props['archiveFile']:
if self.testExtension(inFile):
zipType = zipfile.ZIP_STORED
else:
zipType = zipfile.ZIP_DEFLATED
#self.curZip = zipfile.ZipFile(archivePath+archiveFile+"_"+str(self.curZipInc)+".zip",'w')
archiveSize = os.stat(self.curArchiveName).st_size
oneMeg = 1000000
oneGig = 1000 * oneMeg
fileLimit = 600 * oneMeg
if archiveSize - fileLimit > 1:
self.curZip.close()
self.curZipInc += 1
self.curArchiveName = os.path.join(self.props['destPath'],self.props['archiveFile']+"_"+str(self.curZipInc)+".zip")
self.curZip = zipfile.ZipFile(self.curArchiveName,'w',zipfile.ZIP_DEFLATED)
self.archiveList.append(self.curArchiveName)
print archiveSize,
if self.testMode:
print "Zipping file:"+os.path.join(inDir,inFile)
else:
try:
inDir = inDir.encode('ascii','ignore')
inFile = inFile.encode('ascii','ignore')
self.curZip.write(os.path.join(inDir,inFile),None,zipType)
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (os.path.join(inDir,inFile), '', str(why))
return zlib.crc32(inFile)
def processDir(self,inDir,recurse=False,inLog=False):
self.returnStr = ""
inDir = os.path.abspath(inDir)
i = 0
if recurse:
if self.testMode:
print "Beginning recurse of:"+inDir
for curPath,dirs,files in os.walk(inDir):
for inName in files:
result = self.processFile(curPath,inName)
i += 1
if result:
if(inLog):
inLog.write(result)
inLog.flush()
else:
self.returnStr += "" #result
self.report(os.path.join(curPath,inName),1)
#self.report(" Files:"+str(i)+"\n")
#if (i % 40)==0:
# print " Files:"+str(i)+"\n"
#self.returnStr = ""
#if inName[-3:] == "php":
# processFile(inName,curPath,walkLog)
#print inName
#if inName[-2:] == "as":
# processFile(inName,curPath,walkLog)
#g.es(inName+"\t"+curPath+"\t"+str(fSize)+"\t"+str(fLines)+"\t")
if self.ignoreSVN:
if '.svn' in dirs:
dirs.remove('.svn') # don't visit CVS directories
#walkLog.close()
return True
def startCopy(self,inDir,recurse,inLog=False):
print "Starting copy..."
#elif(self.cmdOptions.copydir):
#self.copyFiles(self.cmdOptions.copydir, self.cmdOptions.destination)
#else:
if self.cmdOptions.noarchive:
print "No archiving, just transfer file:"+self.cmdOptions.noarchive
self.archiveList.append(self.cmdOptions.noarchive)
else:
if archiveFile:
self.curArchiveName = archivePath+archiveFile+"_"+str(self.curZipInc)+".zip"
self.curZip = zipfile.ZipFile(self.curArchiveName,'w') # ,zipfile.ZIP_DEFLATED
self.archiveList.append(self.curArchiveName)
self.processDir(inDir,recurse,inLog)
if archiveFile:
self.curZip.close()
if self.cmdOptions.ftpdest:
print "\nStarting ftp to "+self.cmdOptions.ftpdest+"..."
self.ftpArchiveList(self.cmdOptions.ftpdest,"root","mypassword")
# TODO: Activate the command line Task
if self.cmdOptions.ftpdest == 'task':
curType = 'svn'
print "Copy complete."
def createTargetList(self,xmlDOM):
targets2 = xmlDOM.getElementsByTagName("target")
for target in targets2:
curType=target.localName
if curType:
targetName = target.getAttribute('name')
self.targetList[targetName] = target
self.tags['availabletargets'] = ", ".join(self.targetList.keys())
def getAttr(self,inNode,attrName,defaultVal=''):
try:
if inNode.hasAttribute(attrName):
return inNode.getAttribute(attrName)
else:
return defaultVal
except:
return ''
# Execute each command in the XML batch script
def executeScript(self,xmlDOM,parentName='project',targetDefault=''):
#print "Starting executeScript"
if parentName=='project':
execStartTime = strftime("%H:%M")
#targetParent = xmlDOM.getElementsByTagName(parentName).item(0)
#print xmlDOM.getElementsByTagName(parentName)
if len(targetDefault)==0:
targetDefault = self.getAttr(xmlDOM,'default','')
# Check if there is a target that needs to be executed before this one
execBeforeStr = self.getAttr(xmlDOM,'depends','')
execBeforeStr = self.getAttr(xmlDOM,'execbefore',execBeforeStr)
execBeforeStr = self.replaceTags(execBeforeStr)
if(len(execBeforeStr)>0):
execBeforeArray = execBeforeStr.split(',')
for execBefore in execBeforeArray:
if(len(execBefore)>0 and execBefore in self.targetList):
# If new target has a default, set it for the recursive call
defaultInTarget = self.getAttr(self.targetList[execBefore],'default','')
# Check the other names that this attribute might be under
defaultInTarget = self.getAttr(self.targetList[execBefore],'execafter',defaultInTarget)
#print "Executing before:"+execBefore
defaultInTarget = self.replaceTags(defaultInTarget)
self.executeScript(self.targetList[execBefore],'target',defaultInTarget)
else:
print "Could not find target:"+execBefore
#targetDefault = xmlDOM.getElementsByTagName(parentName).item(0).getAttribute('default')
targets = xmlDOM.childNodes
finished = False
for target in targets:
curType=target.localName
if curType:
# If item has enabled attribute set to zero, move to the next one
if target.getAttribute('enabled') != '0' and not finished:
# Create a dictionary of the current XML attributes
attrList = {}
if target.attributes:
attrList = {}
for attr in target.attributes.keys():
attrList[attr]=target.attributes[attr].value
# Set common attributes
if target.getAttribute('src'):
self.props['srcPath'] = self.replaceTags(str(target.getAttribute('src')))
if target.getAttribute('dest'):
self.props['destPath'] = self.replaceTags(str(target.getAttribute('dest')))
if curType == 'copy':
if self.testMode:
print self.curSpaces+"Copy src:"+target.getAttribute('src')+" dest:"+target.getAttribute('dest')
else:
self.taskCopy({'src':target.getAttribute('src'),'dest':target.getAttribute('dest')})
elif curType == 'ftp':
#self.props['srcPath'] = self.replaceTags(str(self.props['srcPath']))
if self.testMode:
print self.curSpaces+"ftp src:"+target.getAttribute('src')+" dest:"+target.getAttribute('dest')
self.taskFTP(self.props['srcPath'],target.getAttribute('dest'),target.getAttribute('username'),target.getAttribute('password'))
elif curType == 'zip':
self.props['archiveFile'] = self.replaceTags(str(target.getAttribute('archiveFile')))
if self.testMode:
print self.curSpaces+"zip src:"+target.getAttribute('src')+" dest:"+target.getAttribute('dest')+" archiveFile:"+target.getAttribute('archiveFile')
self.taskZip(self.props['srcPath'],self.props['destPath'],self.props['archiveFile'])
elif curType == 'pause':
if self.testMode:
print self.curSpaces+"pause"
a = raw_input("Press the ENTER key to continue...")
print a
elif curType == 'input':
if self.testMode:
print self.curSpaces+"pause"
else:
msg = "Do you want to continue(y)?"
if target.getAttribute('msg'):
msg = target.getAttribute('msg')
answer = raw_input(msg)
if answer != "y":
print "Operation aborted."
finished = True
elif curType == 'exec':
if self.testMode:
print self.curSpaces+"exec"
self.taskExec(target.attributes)
elif curType == 'crontab':
if self.testMode:
print self.curSpaces+"crontab"
self.taskCrontab(attrList)
elif curType == 'joomla':
if self.testMode:
print self.curSpaces+"joomla"
self.taskJoomla(attrList)
elif curType == 'mysql' or curType == 'dbsummary' :
self.props['action'] = str(target.getAttribute('action'))
if target.getAttribute('db_name'):
self.props['db_name'] = str(target.getAttribute('db_name'))
if target.getAttribute('db_host'):
self.props['db_host'] = str(target.getAttribute('db_host'))
if target.getAttribute('db_username'):
self.props['db_username'] = str(target.getAttribute('db_username'))
if target.getAttribute('db_password'):
self.props['db_password'] = str(target.getAttribute('db_password'))
if target.getAttribute('db_port'):
self.props['db_port'] = str(target.getAttribute('db_port'))
if target.getAttribute('db_socket'):
self.props['db_socket'] = str(target.getAttribute('db_socket'))
if target.getAttribute('sp_user'):
self.props['sp_user'] = str(target.getAttribute('sp_user'))
if target.getAttribute('dest_file'):
self.props['dest_file'] = self.replaceTags(str(target.getAttribute('dest_file')))
self.props['query'] = str(target.getAttribute('query'))
self.props['msg'] = str(target.getAttribute('msg'))
if self.testMode:
print self.curSpaces+curType
else:
if curType == 'mysql':
self.taskMySQL(attrList)
else:
self.taskDBSummary(attrList)
elif curType == 'log':
if self.testMode:
print self.curSpaces+"log"
else:
self.taskLog(attrList)
elif curType == 'email':
if self.testMode:
print self.curSpaces+"email"
else:
self.taskEmail(attrList)
elif curType == 'svn':
if self.testMode:
print self.curSpaces+"svn"
else:
self.taskSVN(attrList)
elif curType == 'workingdir':
if self.testMode:
print self.curSpaces+curType
else:
self.taskWorkingDir(attrList)
elif curType == 'mkdir':
if self.testMode:
print self.curSpaces+curType
else:
self.taskMkDir(attrList)
elif curType == 'property':
if target.getAttribute('name'):
#print self.curSpaces+"set property '" + target.getAttribute('name') + "' to " + target.getAttribute('value')
self.props[target.getAttribute('name')] = self.replaceTags(target.getAttribute('value'))
elif curType == 'tag':
if target.getAttribute('name'):
tempTagVal = ''
if target.getAttribute('type') == 'input':
tempTagVal = raw_input("Enter a value for the tag "+target.getAttribute('name')+"(default:"+target.getAttribute('value')+"):")
if len(tempTagVal)==0:
tempTagVal = target.getAttribute('value')
tempTagVal = self.replaceTags(tempTagVal)
print self.curSpaces+"set tag '" + target.getAttribute('name') + "' to " + tempTagVal
self.tags[target.getAttribute('name')] = tempTagVal
if(len(targetDefault)>0):
if(targetDefault in self.targetList):
# If target uses tags, replace them
targetDefault = self.replaceTags(targetDefault)
# If new target has a default, set it for the recursive call
defaultInTarget = self.getAttr(self.targetList[targetDefault],'default','')
# Check the other names that this attribute might be under
defaultInTarget = self.getAttr(self.targetList[targetDefault],'execafter',defaultInTarget)
defaultInTarget = self.replaceTags(defaultInTarget)
#print "Executing after:"+targetDefault
self.executeScript(self.targetList[targetDefault],'target',defaultInTarget)
else:
print "Could not find target:"+targetDefault
if not self.props['quietmode']:
if parentName=='project':
print "Script execute complete. ST:"+execStartTime+" ET:"+strftime("%H:%M")
# Parse the XML batch file
def loadScript(self,fName):
f = open(fName,'r')
xmlStr = f.read()
f.close()
return xml.dom.minidom.parseString(xmlStr)
# Process the command line arguments
def processArgs(self,options,args):
if self.cmdOptions.recursive:
self.props['recursive'] = 1
if options.quietmode:
self.props['quietmode']=1
if self.props['ignoresvndir']:
if not self.props['quietmode']:
self.report("Ignoring SVN folders.")
if options.testmode:
self.testMode = True
if not self.props['quietmode']:
print "*** Test Mode is on ***"
# Setup install as a Linux alias
def install():
pass
# Register a command for access from the command line
def registerCommand(self,cmdName, cmdArgs):
# Set the method that will be called when command is requested
#cmdArgs['cmdMethod'] = cmdMethod
self.commandList[cmdName] = cmdArgs
def registerCoreCommands(self):
self.addExample('',"python todocopy.py","Execute Todo Copy")
self.addExample('',"python todocopy.py example_exec.xml","Execute Todo Copy XML script/macro")
self.addExample('',"python todocopy.py example_exec.xml target1","Execute target1 in XML script")
self.registerCommand('copy',[self.taskCopy,['src',''],['dest','']])
self.addExample('copy', "todocopy.py copy ./ ../yhw_production","Copy files from -> to")
self.registerCommand('copylist',[self.taskCopyList,['filelist',''],['dest','']])
self.addExample('copylist',"todocopy.py copylist filelist.txt ../yhw_production","Copy all files in list with automatic path creation")
self.registerCommand('createlist',[self.taskCreateList,['src',''],['dest',''],['type','newline']])
self.addExample('createlist',"todocopy.py createlist -r .","Display a list of all files (recursive) in the current dir")
self.addExample('createlist',"todocopy.py createlist -r . filelist.txt","Output a list of all files (recursive) in the current dir")
self.registerCommand('ftplist',[self.taskFTPList,['filelist',''],['dest','']])
self.addExample('ftp',"todocopy.py -l filelist.txt --ftp 205.107.10.199","FTP ")
self.registerCommand('workingdir',[self.taskWorkingDir,['value','./']])
self.registerCommand('dbsummary',[self.taskDBSummary,['tablelist','']])
self.addExample('dbsummary',"todocopy.py dbsummary -o dbsumm.xml","Generate database summary and output to .xml file")
self.addExample('dbsummary',"todocopy.py dbsummary tbvenue,tbtasktype -o db.xml","Generate database summary of 2 tables and output to .xml file")
self.registerCommand('svn',[self.taskSVN,['action',''],['dest','']])
self.registerCommand('svndir',[self.taskSVNDirList,['action',''],['dest','']])
self.registerCommand('examples',[self.taskExamples,['type','']])
self.registerCommand('setpath',[self.taskSetPath,['path','']])
self.addExample('setpath',"todocopy.py setpath","Adds the cwd to current path -- useful for executing TC from any folder")
self.registerCommand('md5',[self.taskMD5,['source','']])
self.addExample('md5',"todocopy.py md5 myplaintext","Returns an MD5 value of the passed plaintext")
self.registerCommand('sha1',[self.taskSHA1,['source','']])
self.addExample('sha1',"todocopy.py sha1 myplaintext","Returns an SHA1 value of the passed plaintext")
self.registerCommand('joomla',[self.taskJoomla,['action',''],['src','']])
self.registerCommand('findabove',[self.taskFindAbove,['src',''],['filename','']])
# TODO: Replace these placeholders with real commands
self.registerCommand('scp',[self.taskSCP,['action',''],['dest','']])
self.registerCommand('email',[self.taskFTPList,['filelist',''],['dest','']])
self.registerCommand('log',[self.taskFTPList,['filelist',''],['dest','']])
def controller(self,options,args):
usage = " USAGE --> python todocopy.py command|xmlfile [options] arg1 arg2\n"
usage += " For options list, type: python todocopy.py --help"
self.registerCoreCommands()
self.props['quietmode']=False
self.ignoreSVN = True
self.cmdOptions = options
self.cmdArgs = args
# Reset database connection
self.mysqlconn = None
autorunList = ["tc_autorun.xml","Makefile"]
self.processArgs(options,args)
if not self.props['quietmode']:
print "--- Todocopy by Dan Rahmel, revision #"+self.getRev()+" --- "
if len(args)==0:
print "------> For examples, execute: python todocopy.py examples\n"
# Check to make sure some switches or arguments were passed into the app -- there is always 1, the path of the script
if len(sys.argv)<2:
for autorunFName in autorunList:
# Try to execute an autorun script
if os.path.isfile(autorunFName):
print "Executing autorun..."
tempDOM = self.loadScript(autorunFName)
self.createTargetList(tempDOM)
self.executeScript(tempDOM)
return
# Check for a TC script file
elif args[0][-3:].lower()=="xml":
# Load the XML into a DOM
tempDOM = self.loadScript(args[0])
# Harvest references to all targets in DOM
self.createTargetList(tempDOM)
# Get the primary project DOM
projectDOM = tempDOM.getElementsByTagName('project').item(0)
# If a target is specified, pass that to the execution instead of running the default
targetDefault = ''
if len(args)>1:
targetDefault = args[1]
self.executeScript(projectDOM,'project',targetDefault)
return
elif args[0] in self.commandList:
argList = {}
for i in range(1,len(self.commandList[args[0]])):
tempVal = self.commandList[args[0]][i][1]
# ignore the command itself
try:
tempVal = args[i]
except:
pass
argList[self.commandList[args[0]][i][0]] = tempVal
self.commandList[args[0]][0](argList)
return
else:
srcDir = "c:/TestCompress/"
archivePath = "c:/"
archiveFile = "Test_BU022108"
if len(args)==1:
if args[0][-3:].lower()=="xml":
tempDOM = self.loadScript(args[0])
self.createTargetList(tempDOM)
projectDOM = tempDOM.getElementsByTagName('project').item(0)
print projectDOM
self.executeScript(projectDOM)
return
elif len(args)>1:
# Must be an implicit copy
argList = {}
# Start at 1 to ignore the method itself
for i in range(1,len(self.commandList['copy'])):
tempVal = self.commandList['copy'][i][1]
try:
tempVal = args[i-1]
except:
pass
argList[self.commandList['copy'][i][0]] = tempVal
self.commandList['copy'][0](argList)
return
#self.props['srcPath'] = args[0]
#archivePath = args[1]
#self.props['destPath'] = args[1]
#archiveFile = args[2]
#self.props['archiveFile'] = args[2]
print usage
if __name__ == '__main__':
insTodoCopy = todocopy()
usage = " python %prog command|xmlfile [options] arg1 arg2\n"
parser = OptionParser(usage=usage)
parser.add_option("-o", "--outfile", dest="outfile",help="output data to OUTFILE", metavar="OUTFILE")
parser.add_option("-f", "--ftp", dest="ftpdest",help="ftp archive files to FTPDEST", metavar="FTPDEST")
parser.add_option("-n", "--noarchive", dest="noarchive",help="no file archiving", metavar="ARCHIVENONE")
parser.add_option("-l", "--list", dest="copylist",help="INI format file of files/dirs to copy", metavar="FILE")
parser.add_option("-d", "--destination", dest="destination", help="Destination path for operation", metavar="DEST")
parser.add_option("-s", "--source", dest="source", help="Source path for operation", metavar="SRC")
parser.add_option("-c", "--copydir", dest="copydir", help="Source directory of files to copy", metavar="COPYDIR")
parser.add_option("-r", "--recursive",action="store_true", dest="recursive", default=False,help="Perform recursive operation")
parser.add_option("-e", "--test",action="store_true", dest="testmode", default=False,help="Don't actually copy or create files")
parser.add_option("-q", "--quiet",action="store_true", dest="quietmode", default=False,help="Turn off copyright messages")
parser.add_option("-t", "--task", dest="tasktype", help="Task to execute (equivalent to XML tag)", metavar="TASKTYPE")
parser.add_option("-u", "--username", dest="username", help="Set username", metavar="USERNAME")
parser.add_option("-[", "--password", dest="password", help="Set password", metavar="PASSWORD")
(options, args) = parser.parse_args()
insTodoCopy.controller(options,args)
|
drahmel/todocopy
|
todocopy.py
|
Python
|
gpl-2.0
| 83,140
|
[
"VisIt"
] |
1cc04ee9cc8c77b6444fe47472e671a6073093294c2db44f7e10757e6e0d5313
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import *
engine = create_engine('postgresql://catalog:catalog@localhost:5432/catalog')
base.Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create a first user for entering items
user = user.User(name="Chris", email="chris@email.com")
session.add(user)
session.commit()
# Create a first store to associate products with
store = store.Store(name="Virtual Reality Store", user_id=1)
session.add(store)
session.commit()
# Create items to populate the product list
# Item information compiled from Amazon.com and Steampowered.com
item = product.Product(name="Samsung Gear VR",
category="Hardware",
description="""Light weight so you can play and watch more
comfortably. Easy to use touch pad wide field of view, precise
head-tracking and low latency brings reality to the virtual. Be
transported to amazing new worlds, in games, video and images.
Thousands of 360 degree panoramic photos. Compatible with:
Samsung Galaxy S7, S7 edge,Note5,S6 edge+,S6,S6 edge. Improved
fit, including room for most eyeglasses and improved padding for
extra comfort and durability.""",
price="$59.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item2 = product.Product(name="Plustore 3D Virtual Reality Glasses",
category="Hardware",
description="""Revolutionary optical system - it completely
eliminates the feel of vertigo. Fit everyone's eyes: Pupil
settings for the best 3D experience, even to those near-sighted.
Innovative design comfortable wearable - Adjustable straps for
flexible wear, Super Lighter Weight of 156g. Turn your
smartphone into a virtual reality viewer. Enjoy real 360-degree
videos and immersive world of VR from the comfort of your home.
Adaptable - adaptable for adroid and ios smart phones with the
screen size being "4.7-6" inches and pixels over 1280*720.""",
price="$16.88",
store_id=1,
user_id=1)
session.add(item2)
session.commit()
item3 = product.Product(name="SARLAR 3D VR Glasses",
category="Hardware",
description="""TAKE CARE OF YOUR EYES AND PUT ZERO
PRESSURE-Lower eyelid is the weakest part of the eyes. It's
based on human engineering, redesign the position on basis of
regurlar headband to re lieve the load of nose bridge and eyelid
so as to allevate feeling of fatigue. OVERSIZED VISUAL ANGLE
GETS YOU IMMERSIVE-FOV102 panoramic view and the screen is
magnified 5.2 times than before,the super vision will give you
unlimited world, an incredible visual fidelity and immersive
feeling. NO ADJUSTMENTS ARE NEEDED FOR THE MIDDLE LINE OF DOUBLE
SCREENS-The supporting structure for mobile phones in the left
and right together with gear adjustment can perfectly immobilize
the phone.Also,it supports phones with larger size.There is no
need to adjust the position of moble phones after the first
adjustment.The middle line adjustment is so simple!The design is
just humanized and awesome. ASPHERIC LENS DESIGN,FRAMES WILL BE
MORE COMFORTABLE-Aspheric lens design,the frame has no
abnormalities and perfectly fit for the visual habits,no
spinning sensation will generate when wearing.High adjustment no
ghosting. COMPATIBLE WITH ALMOST ALL MOBILE PHONES-Sarlar vr
glasses is with small size but it supports large size phones
will various brands and types,compatibles with almost all mobile
phones.It is suitable for any smart phone which screen size is
from 4.0-6.5",more than that you cannot call it mobile phone,it
is compatible with mobile phones which length doesn't exceed
175mm and width doesn't exceed90mm.""",
price="$19.99",
store_id=1,
user_id=1)
session.add(item3)
session.commit()
item4 = product.Product(name="Cellay 3D VR Goggles",
category="Hardware",
description="""Glasses-free: Without wearing the glasses if your
visual acuity is under 600 degree. IMAX Effect: Anti-distortion
aspheric design, lowering down the distortion and enjoying 3D
IMAX world. Adjustable Distance:VR virtual reality headset is
able to adjust focal length and pupil distance according to
different people. T-shaped Strap: It helps you reduce the
pressure around your eyes and almost suitable for everyone.
Compatible with: VR helmet fit for smartphone just as Apple and
Android phone and screen between 4.0~6.5 inches.""",
price="$33.45",
store_id=1,
user_id=1)
session.add(item4)
session.commit()
item5 = product.Product(name="Google Cardboard",
category="Hardware",
description="""GOOGLE CARDBOARD is the primary experience
version of 3D VR Glasses. It's made from the AAA grade
corrugated paper which is the strongest material. Our product.Product is
the highest quality for this price in the market. HAVING
ADVANCED GOOGLE CARDBOARD according to the advice from customers
and tested it over 80 times. We add the longer head strap,
suction cups and forehead pad. So far, we have sold more than
500,000 sets. COMPATIBLE FOR all the 3.5"- 6.0" smartphones.
Whether your phone system is Android system or other systems,
you can use the TOPMAXION Cardboard to watch Left-right 3D
movies on Video Player and play varieties of VR games. IN ORDER
TO EXPERIENCE HIGH QUALITY 3D FEELING, you'd better use high
resolusion smartphones. Experience a truly stunning, engrossing
VR experience with cinematic HD visuals from your smart phone's
screen using the included biconvex lenses offering a 37 mm focal
length for the best visuals! THE PERFECT SOLUTION for Virtual
Reality on a budget!Box-style package with good portability,
easily take anywhere.""",
price="$9.99",
store_id=1,
user_id=1)
session.add(item5)
session.commit()
item = product.Product(name="Oculus Rift",
category="Hardware",
description="""Oculus Rift's advanced display technology
combined with its precise, low-latency constellation tracking
system enables the sensation of presence. Customizable,
comfortable, adaptable, and beautiful, Rift is technology and
design as remarkable as the experiences it enables. Every aspect
of Rift was designed to be easy, inviting, and comfortable to
use - and that extends to the VR environment we've created as a
starting point for your journeys. Discover and download games
across genres ranging from action RPGs, sci-fi shooters,
mind-bending puzzle games, and more - and play them from an
entirely new perspective. Lucky's Tale is included with every
Rift purchase. Windows PC and an internet connection are
required for Oculus Rift - please review recommended system
specs.""",
price="$599.00",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="HTC Vive",
category="Hardware",
description="""Vive is built from the ground up for room-scale
VR, which allows you to physically move around objects in the
virtual space. With more than 500 games and growing for SteamVR,
everything you love about Steam is now available in VR. The
Gallery: Call of the Starseed, Tilt Brush and Zombie Training
Simulator come with Vive for free. An adjustable headset and
multiple eye relief adjustments, including lens distance and
IPD, to make Vive comfortable and clear. Wireless controllers
designed specifically for VR make interactions with objects
natural and intuitive. Enjoy a safe, convenient experience with
Chaperone bounds of your play area, a front-facing camera to
view the real world and notifications from your phone in VR.
Compatible Windows computer and internet connection
required-refer to the recommended computer specs below.""",
price="$799.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Virtual Reality Insider: Guidebook for the VR Industry",
category="Reference",
description="""Virtual reality is as explosive a technology as
the Internet! Are you working in the VR industry, or curious to
find out more about it? VR Insider is an overview and guidebook
for consumer virtual reality. For the industry veteran, it is
the perfect book to stir up new ideas and see how the big
picture fits together. For newcomers to VR, it is the fastest
way to catch up on what is happening and figure out how to apply
your skills. Affordable virtual reality hardware finally exists,
and this book will help you create its content! Best of all,
this book is readable in 1-2 hours!""",
price="$8.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="""Learning Virtual Reality: Developing Immersive
Experiences and Applications for Desktop, Web, and Mobile""",
category="Reference",
description="""As virtual reality approaches mainstream consumer
use, a vibrant development ecosystem has emerged in the past few
years. This hands-on guide takes you through VR development
essentials for desktop, mobile, and browser-based applications.
You'll explore the three go-to platforms-OculusVR, Gear VR, and
Cardboard VR-as well as several VR development environments,
programming tools, and techniques. If you're an experienced
programmer familiar with mobile development, this book will help
you gain a working knowledge of VR development through clear and
simple examples. Once you create a complete application in the
final chapter, you'll have a jumpstart on the next major
entertainment medium.""",
price="$26.01",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="The VR Book: Human-Centered Design for Virtual Reality",
category="Reference",
description="""Without a clear understanding of the human side
of virtual reality (VR), the experience will always fail. The VR
Book bridges this gap by focusing on human-centered design.
Creating compelling VR applications is an incredibly complex
challenge. When done well, these experiences can be brilliant
and pleasurable, but when done badly, they can result in
frustration and sickness. Whereas limitations of technology can
cause bad VR execution, problems are oftentimes caused by a lack
of understanding human perception, interaction, design
principles, and real users. This book focuses on the human
elements of VR, such as how users perceive and intuitively
interact with various forms of reality, causes of VR sickness,
creating useful and pleasing content, and how to design and
iterate upon effective VR applications. This book is not just
for VR designers, it is for managers, programmers, artists,
psychologists, engineers, students, educators, and user
experience professionals. It is for the entire VR team, as
everyone contributing should understand at least the basics of
the many aspects of VR design. The industry is rapidly evolving,
and The VR Book stresses the importance of building prototypes,
gathering feedback, and using adjustable processes to
efficiently iterate towards success. With extensive details on
the most important aspects of VR, more than 600 applicable
guidelines, and over 300 additional references, The VR Book will
bring a strong foundation for anyone and everyone involved in
creating VR experiences.""",
price="$71.96",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="""The Real Reason Facebook Acquired Oculus Rift: How
Virtual Reality Will Disrupt Every Industry and Why You Should Care""",
category="Reference",
description="""What is Facebook's interest in acquiring a gaming
platform for $2 Billion? In this book I make bold predictions
that will be reality within the next five years. Regardless of
what industry you work in today, this will affect you. Learn
the history of virtual reality and the 4 simple steps necessary
for you to profit off of this massively game-changing
technology.""",
price="$9.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="The Lab",
category="Software",
description="""Set in a pocket universe of Aperture Science,
The Lab offers a wide range of ways to enjoy VR, all in one
application. - Slingshot : Begin your career as a
Calibration Trainee by recklessly destroying everything in the
Aperture Storage Annex using the Core Calibration slingshot.
- Longbow : Use your archery skills to defend your noble
castle gate from a rampaging but adorable and equally noble
horde of attackers. - Xortex : Are you a bad enough
dude to become a Xortex ace? Relive the golden era of gaming --
only this time, it's all around you. - Postcards :
Visit exotic, far-off locales from the comfort of your own head.
- Human Medical Scan : Explore the intricate beauty of
the human body through a highly detailed model created from a
series of CT medical scans. - Solar System : Why watch
shows about the vast majesty of space when you can jump in and
see it for yourself? Have educational space-fun while putting
Neil Degrasse-Tyson out of business. - Robot Repair :
Can you repair a robot? Good, because Aperture Science's Human
Diversity Outreach Program is now hiring. - Secret Shop
: The fantasy equivalent of a twenty-four-hour convenience
store is now open for business! Peruse artifacts, shop for
familiars and cast a spell or two at Dota's Secret Shop!""",
price="Free",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Keep Talking and Nobody Explodes",
category="Software",
description="""In Keep Talking and Nobody Explodes, one player
is trapped in a virtual room with a ticking time bomb they must
defuse. The other players are the "Experts" who must give the
instructions to defuse the bomb by deciphering the information
found in the bomb defusal manual. But there's a catch: the
experts can't see the bomb, so everyone will need to talk it
out - fast! Rounds are fast-paced, tense, occasionally silly,
and almost always loud. Everybody has a role to play whether
they are defusing the bomb or not. Swap out between rounds and
share the experience with all of your friends! Puzzle solving
and communication skills - and maybe a few friendships - will
be put to the test as players race to defuse bombs while
communicating quickly, clearly, and effectively.""",
price="$14.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Space Pirate Trainer",
category="Software",
description="""Space Pirate Trainer is the official trainer for
wannabe space pirates on the HTC Vive. Remember those awesome
classic arcade cabinets? Imagine if those were immersive...
Space Pirate Trainer puts you in one of those; fighting off
relentless waves of droids with all the weapons and gadgets you
would ever need as a Space Pirate. You better dodge some of
those incoming lasers though, since just using your shields
won't get you in the top rankings. Pick up your blasters, put
on your sneakers, and dance your way into the Space Pirate
Trainer hall of fame. No real droids where harmed during the
creation of this game. Use your straps and stay in your VR area
to make sure no humans will be harmed.""",
price="$14.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Vanishing Realms",
category="Software",
description="""Vanishing Realms: Rite of Steel is an immersive
Role Playing Game designed from the ground up for Virtual
Reality play. Use one-to-one motion controls and movement so
that you are fully in control of combat. To swing, duck and
block, you don't hit a button, but physically move to perform
these actions as if you were there - because you are! Treasure
chests, weapon shops, a horde of undead foes, mystic writings,
banished gods, lost artifacts, ancient tombs and moonlit forest
- it's all here waiting to be discovered in a beautifully
hand-crafted VR realm.""",
price="$19.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="The Body VR",
category="Software",
description="""The Body VR is an educational virtual reality
experience that takes the user inside the human body. Travel
through the bloodstream and discover how blood cells work to
spread oxygen throughout the body. Enter one of the billions
of living cells inside our body and learn how the organelles
work together to fight deadly viruses.""",
price="Free",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Anyland",
category="Software",
description="""Anyland is an open virtual reality universe to
create your home, chat with others, explore & live in! Made
from the ground up for Vive, and shaped by all of us together.
Anyland is a blank canvas for your imagination with no
predefined stories and themes... it's up to all of us together
to invent the world. Build, script, share, collect, meet
friends, have parties, play games, watch videos, explore areas,
and make history in a new reality! We are two indie devs & VR
enthusiasts and this is our labor of love. We hope to say hi to
you in Anyland! If there's anything you need, let us know
please, and thanks for being in Anyland! """,
price="$11.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Waltz of the Wizard",
category="Software",
description="""Waltz of the Wizard is a virtual reality
experience that lets you feel what it's like to have magical
powers. Combine arcane ingredients into a boiling cauldron with
the help of an ancient spirit trapped in a human skull. Unleash
creative or destructive wizardry upon a fully interactive
virtual world. Travel to new places, finding yourself in
mysterious circumstances full of detail and unforgettable
atmosphere.""",
price="Free",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Portal Stories: VR",
category="Software",
description="""Portal Stories: VR is a brand new mini story set
in the Portal Universe. It features 10 brand new puzzles,
specifically designed to work using the 360 degrees room scale
that SteamVR offers. Inside Aperture, you'll be able to use the
new "Aperture Science Instant Teleportation Device" and the
"Aperture Science Apparatus Retrieval Tool" to solve the new
tests. Powered by Unreal Engine 4, Portal Stories: VR gives the
Aperture Science facility a whole fresh look. With new models,
textures, particles and more! """,
price="Free",
store_id=1,
user_id=1)
session.add(item)
session.commit()
item = product.Product(name="Zombie Training Simulator",
category="Software",
description="""Are you really ready for the zombie apocalypse?
Zombie Training Simulator is the dominant authority and world's
most advanced zombie preparation tool. We train you with real
world weapons, tactics and scenarios to ensure that you are well
equipped when the day comes. Are zombies coming on space ships?
Do zombies possess super powers? Will animals become zombies?
Nope! We've done the research on the most likely zombie
characteristics and are here to make sure you're prepared.
Train and unlock powerful weapons including pistols, shotguns
and machine guns. Learn the incredible zombie stopping power of
each weapon. See how much zombies love fresh meat, sound and
explosives. Combining these tactics will make you a truly
distinguished zombie apocalypse survivor. Play our speed tests
to prepare your wicked accuracy, and try survival mode to
practice surviving the ever increasing zombie hoards before
it's time to run. Our integrated global leaderboard system
displays your progress and capabilities around the world. Will
you be the first person your friends will call when the day
comes? When the news reports the zombies are here and your
friend gives you a call, "They're here. I need you," how will
you respond? That's right, you'll say "I'll be right over.
We've got this thanks to ZTS!" We hope you're as excited as we
are to prepare the world for the impending zombie invasion. Are
you truly prepared?""",
price="$19.99",
store_id=1,
user_id=1)
session.add(item)
session.commit()
print "Added products to database!"
|
caasted/aws-flask-catalog-app
|
database_fill.py
|
Python
|
mit
| 21,493
|
[
"Galaxy",
"VisIt"
] |
a54097013d80a62b779ecbc78988cedabbf7e734ae01f2e2442c10ff274a6f7f
|
from __future__ import annotations
from scitbx.array_family import flex
from dials.algorithms.clustering import plots
from dials.util.observer import Observer, singleton
def uc_params_from_experiments(experiments):
uc_params = [flex.double() for i in range(6)]
for expt in experiments:
uc = expt.crystal.get_unit_cell()
for i in range(6):
uc_params[i].append(uc.parameters()[i])
return uc_params
@singleton
class UnitCellAnalysisObserver(Observer):
"""
Observer to record unit cell clustering data and make plots.
"""
def update(self, script):
"""Update the data in the observer."""
try:
self.data["dendrogram"] = script.unit_cell_dendrogram
except AttributeError:
pass
self.data["experiments"] = script._experiments
def make_plots(self):
"""Generate plots of the unit cell clustering."""
uc_params = uc_params_from_experiments(self.data["experiments"])
d = plots.plot_uc_histograms(uc_params)
if "dendrogram" in self.data:
d["uc_clustering"] = plots.scipy_dendrogram_to_plotly_json(
self.data["dendrogram"],
title="Unit cell clustering",
xtitle="Dataset",
ytitle=r"Distance (Å<sup>2</sup>)",
help="""\
The results of single-linkage hierarchical clustering on the unit cell parameters using
the Andrews-Bernstein NCDist distance metric (Andrews & Bernstein, 2014). The height at
which two clusters are merged in the dendrogram is a measure of the similarity between
the unit cells in each cluster. A larger separation between two clusters may be
indicative of a higher degree of non-isomorphism between the clusters. Conversely, a
small separation between two clusters suggests that their unit cell parameters are
relatively isomorphous.
""",
)
graphs = {"unit_cell_graphs": d}
return graphs
|
dials/dials
|
algorithms/clustering/observers.py
|
Python
|
bsd-3-clause
| 1,973
|
[
"CRYSTAL"
] |
1d63a50337fc4887d2c03669acc4630239fa58fe6eb513623598ce2329ae255c
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Usefull links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os, time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2,transparent_flag=0,transparency_index=0):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr(((dispose & 3) << 2)|(transparent_flag & 1)) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += chr(transparency_index) # transparency index
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
image_info = [im.info for im in images ]
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy, image_info
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0,images_info=None):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
# im = Image.fromarray(im[:,:,:3],'RGB')
self.transparency = True
im = Image.fromarray(im[:,:,:4],'RGBA')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage(),colors=255)
else:
im = nqInstance.quantize(im,colors=255) # Use to quantize the image itself
self.transparency = True # since NQ assumes transparency
if self.transparency:
alpha = im.split()[3]
mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0)
im.paste(255,mask=mask)
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
# for index,im in enumerate(images):
for i in range(len(images)):
im = images[i].convert('RGB').convert('P', palette=AD, dither=dither,colors=255)
if self.transparency:
alpha = images[i].split()[3]
mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0)
im.paste(255,mask=mask)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
transparent_flag = 0
if self.transparency: transparent_flag = 1
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames],transparent_flag=transparent_flag,transparency_index=255)
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
gifWriter.transparency = False # init transparency flag used in GifWriter functions
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy, images_info = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for index,im in enumerate(images2):
tmp = PIL.Image.fromarray(im)
images.append(tmp)
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/images2gif/images2gif.py
|
Python
|
gpl-3.0
| 38,106
|
[
"NEURON"
] |
cfb5b69881a5b65d7fa25bdc6a1a1f42fd6743dba03be2258253c5ab705b6782
|
# VAPiD is an extremely lightweight virus genome annotator that takes any number of viral genomes and annotates them
# producing files suitable for NCBI submission
# Vapid Version
VERSION = 'v1.6.6'
import subprocess
import re
import argparse
import timeit
import os
from Bio.Seq import Seq
from Bio.Blast import NCBIWWW
import platform
import sys
from Bio import Entrez
import time
import shutil
import re
Entrez.email = 'uwvirongs@gmail.com'
# Reads in a fasta file that should have strain names for the names of the sequences - can handle any number of
# sequences. Also strips leading and trailing Ns or ?s from the provided sequence. Also takes an optional boolean
# value to determine if names should be allowed to have system protected characters like / in them. Returns three lists with the names of
# the strains in the first one and the genomes as strings in the second list, the third list is only used when the slashes argument is true
# also changes U's to T's
def read_fasta(fasta_file_loc, slashes=False):
strain_list = []
genome_list = []
full_name_list = []
dna_string = ''
for line in open(fasta_file_loc):
if line[0] == '>':
if slashes:
full_name_list.append(line[1:])
else:
full_name_list.append('')
strain_list.append(line[1:].split()[0])
if dna_string != '':
# strip leading and trailing Ns or ?s because there's no reason to submit them
xip = 0
while dna_string[xip] == 'N' or dna_string[xip] == '?':
xip += 1
y = len(dna_string)
while dna_string[y-1] == 'N' or dna_string[y-1] == '?':
y -= 1
dna_string = dna_string[xip:y]
genome_list.append(dna_string)
dna_string = ''
else:
dna_string += line.strip()
# Just to make sure all our sequences are on the same page
genome_list.append(dna_string)
return strain_list, genome_list, full_name_list
# Spell checking functionality provided by Entrez
# takes an input string and returns the Entrez corrected string as long as it exists
def spell_check(query_string):
# new entrez rules limit requests to no more than 3 a second, this will ensure we don't send more than two
time.sleep(0.5)
handle = Entrez.espell(term=query_string)
record = Entrez.read(handle)
corrected_query = record["CorrectedQuery"]
# Entrez returns blank strings for numerals or things that are spelled correctly
# Since this is based on NCBI's spell checking protein names are included and correct
# However this won't correct SUPER messed up words or made up words
if corrected_query != '':
print('Checking spelling on ' + query_string)
print(query_string + ' was corrected to: ' + corrected_query)
return corrected_query
else:
return query_string
# This function takes the strain name and a location of the individual fasta file we saved earlier and runs a blast
# Search saving the top 15 hits - then the top hit is found that is a complete genome and the fasta and .gbk of that
# are saved - then we run alignment on the two and return two strings one of them our sequence with '-' and the
# other of our new reference sequence. If a sequence is submitted that needs to be reverse complemented to align
# to a reference the sequence will stay this way
def blast_n_stuff(strain, our_fasta_loc):
# if user provided own reference use that one - also use our specifically chosen reference for some viruses
if args.r:
ding = args.r
ref_seq_gb = ding
# if the user provided a database to use print location and use database
elif args.db:
local_database_location = args.db
print('Searching local blast database at ' + local_database_location)
# blastn with word size of 28 because we're searching off a provided reference we're just going to pull the top
local_blast_cmd = 'blastn -db ' + local_database_location + ' -query ' + our_fasta_loc + \
' -num_alignments 1 -word_size 28 -outfmt 6 -out ' + strain + SLASH + strain \
+ '.blastresults'
subprocess.call(local_blast_cmd, shell=True)
# pull first accession number from our reference database
for line in open(strain + SLASH + strain + '.blastresults'):
ref_seq_gb = line.split('|')[3]
break
# online search
elif args.online:
print('Searching NCBI for the best reference sequence (may take longer for multiple requests due to NCBI '
'throttling)')
record = open(our_fasta_loc).read()
# used to have entrez_query = 'txid10239[ORGN]' as an argument to restrict searches to taxonomic group 'Viruses'
result_handle = NCBIWWW.qblast('blastn', 'nt', record, word_size=28, descriptions=0, alignments=15,
format_type='Text')
with open(strain + SLASH + strain + '.blastresults', 'w') as out_handle:
out_handle.write(result_handle.read())
result_handle.close()
# read through the top hits saved earlier and save the accession number of the best hit that's complete
read_next = False
found = False
for line in open(strain + SLASH + strain + '.blastresults'):
if line[0] == '>':
name_of_virus = ' '.join(line.split()[1:]).split('strain')[0].split('isolate')[0].split('complete')[0].split('partial')[0].split('genomic')[0].split('from')[0].strip()
name_of_virus = name_of_virus.split('/')[0]
ref_seq_gb = line.split()[0][1:]
# last part of these two logic checks is so we avoid the misassembled/mutated viruses
# This is going to get really out of hand if we have to keep blacklisting records
# TODO: pull these out of the database, regenerate and reupload
if 'complete' in line and ref_seq_gb.split('.')[0] not in 'KM551753 GQ153651 L08816 HIVANT70C L20587':
found = True
break
else:
read_next = True
elif read_next:
if 'complete genome' in line and ref_seq_gb.split('.')[0] not in 'KM551753 GQ153651 L08816 HIVANT70C L20587':
found = True
break
else:
read_next = False
# if we don't find any complete genomes just pull the top hit from blast and go from there
if not found:
for line in open(strain + SLASH + strain + '.blastresults'):
if line[0] == '>':
name_of_virus = ' '.join(line.split()[1:]).split('strain')[0].split('isolate')[0].split('complete')[0].split('partial')[0].split('genomic')[0].split('from')[0].strip()
ref_seq_gb = line.split()[0][1:]
break
# default case -- use either of the provided reference databases that we will include
else:
# all virus is the preferred database but we'll switch to compressed if that's what the user downloaded
# this list IS ordered by how much I recommend using these databases
if os.path.isfile('all_virus.fasta.nin'):
local_database_location = 'all_virus.fasta'
elif os.path.isfile('virus_compressed.fasta.nin'):
local_database_location = 'virus_compressed.fasta'
elif os.path.isfile('ref_seq_vir.nin'):
local_database_location = 'ref_seq_vir'
# print a helpful error message and exit
else:
print('No local blast database found in this folder! Please install from the github releases page! '
'(https://github.com/rcs333/VAPiD/releases) Or use vapid with --online (not reccomended)')
print('Exiting...')
exit(0)
print('Searching local blast database at ' + local_database_location)
# we're only going to save one because these our pretty decent reference databases
local_blast_cmd = 'blastn -db ' + local_database_location + ' -query ' + our_fasta_loc + \
' -num_alignments 1 -word_size 28 -outfmt 6 -out ' + strain + SLASH + strain \
+ '.blastresults'
subprocess.call(local_blast_cmd, shell=True)
# pull first accession number
for line in open(strain + SLASH + strain + '.blastresults'):
ref_seq_gb = line.split('|')[3]
break
# Download the reference fasta file from
record = Entrez.read(Entrez.esearch(db='nucleotide', term=ref_seq_gb))
# Download .gbk from Entrez, we'll pull annotations from this file later
h2 = Entrez.efetch(db='nucleotide', id=record["IdList"][0], rettype='gb', retmode='text')
e = open(strain + SLASH + strain + '_ref.gbk', 'w')
e.write(h2.read())
e.close()
# if we've specified our own file remove the one we just found and delete it
if args.f:
os.remove(strain + SLASH + strain + '_ref.gbk')
shutil.copyfile(args.f, strain + SLASH + strain + '_ref.gbk')
# NCBI online tools don't want more than like 1 request every .2 seconds or something so we just sleep for a second here
time.sleep(1)
# because the difference in how this stuff gets saved we have to pull online differently
if not args.online:
for line in open(strain + SLASH + strain + '_ref.gbk'):
if 'DEFINITION' in line:
# this forces removal of 'complete/partial annotations because those get added by genbank and there is no reason to include them'
# we also want to strip off specific strain and isolate names in order to tend towards being more general
name_of_virus = ' '.join(line.split()[1:]).split('strain')[0].split('isolate')[0].split('complete')[0].split('partial')[0].split('genomic')[0].split('from')[0].strip()
# let the user know
print(ref_seq_gb + ' was the selected reference')
print(name_of_virus + ' was the parsed name of the virus')
h = Entrez.efetch(db='nucleotide', id=record["IdList"][0], rettype='fasta', retmode='text')
d = open(strain + SLASH + strain + '_ref.fasta', 'w')
d.write(h.read())
d.close()
# mafft rules and the developer of mafft is awesome
z = open(strain + SLASH + strain + '_aligner.fasta', 'w')
fe = open(our_fasta_loc)
for line in fe:
z.write(line)
fe.close()
ge = open(strain + SLASH + strain + '_ref.fasta')
z.write('\n')
for line in ge:
z.write(line)
ge.close()
z.close()
print('Aligning reference and query...')
# Windows
if SLASH == '\\':
# since we include the windows installation of maaft with vapid we can hard code the path
s = 'mafft-win\\mafft.bat --adjustdirection --quiet ' + strain + SLASH + strain + '_aligner.fasta > ' + strain + SLASH + strain + '.ali'
subprocess.call(s, shell=True)
else:
try:
subprocess.call('mafft --adjustdirection --quiet ' + strain + SLASH + strain + '_aligner.fasta > ' + strain + SLASH + strain + '.ali 2>/dev/null',
shell=True)
# print a helpful error message and exit
except:
print('Running on a non windows system, which means you need to install mafft and put it on the sys path '
'yourself.\nI suggest using brew or apt')
exit(0)
ali_list, ali_genomes, dumy_var_never_used = read_fasta(strain + SLASH + strain + '.ali')
need_to_rc = False
# this will create a weird
if '_R_' in ali_list[1]:
if ali_list[1][0:3] == '_R_':
# we need to RC input query and redo
need_to_rc = True
print('Done alignment')
# this is the reverse of what I expect but it works
ref_seq = ali_genomes[1]
our_seq = ali_genomes[0]
# now also returning the accession of the reference for use in the .cmt file as well as a bool for if we need to rerun this block
return name_of_virus, our_seq, ref_seq, ref_seq_gb, need_to_rc
# Takes in two sequences with gaps inserted inside of them and returns arrays that have a -1 in the gap locations and
# count up from 1 in the nucleotide areas - This data structure allows for extremely rapid conversion between relative
# locations in the two sequences although does assume that these genes are of uniform length
# NOTE: This means that when we have reads that like don't have the start codons of the first gene or something we'll
# get a -1 for the start location on our annotation
def build_num_arrays(our_seq, ref_seq):
ref_count = 0
our_count = 0
ref_num_array = []
our_num_array = []
for x in range(0, len(ref_seq)):
if ref_seq[x] != '-':
ref_count += 1
ref_num_array.append(ref_count)
else:
ref_num_array.append(-1)
if our_seq[x] != '-':
our_count += 1
our_num_array.append(our_count)
else:
our_num_array.append(-1)
return our_num_array, ref_num_array
# Takes a gene start index relative to an unaligned reference sequence and then returns the location of the same start
# area on the unaligned sequence that we're annotating using the number arrays to finish
def adjust(given_num, our_num_array, ref_num_array, genome):
found = False
# Handles gene lengths that go off the end of the genome
# 1.6.4 - this is obsolete and a bad implementation, the block at the end of this function takes care of this
# better, I'm leaving this in comments for a while just in case
#if given_num >= len(genome):
# return len(genome)
# Go through our number array and search for the number of interest
if our_num_array[given_num] == '-1':
in_dex = given_num
while our_num_array[in_dex != '-1']:
in_dex += 1
break
return str(our_num_array[in_dex])
else:
found = False
for x in range(0, len(our_num_array)):
if ref_num_array[x] == given_num:
index = x
found = True
break
# now index is the absolute location of what we want
if found:
if our_num_array[index] >= len(genome):
# this is the new handling of when genes run off the end of the submitted genome
return len(genome)
return str(our_num_array[index])
else:
return str(len(genome))
# this opens up the reference .gbk file and pulls all of the annotations, it then adjusts the annotations to the
# relative locations that they should appear on our sequence
def pull_correct_annotations(strain, our_seq, ref_seq, genome):
# Read the reference gbk file and extract lists of all of the protein locations and annotations!
# now we're doing this at the top because we'recalling this earlier
our_seq_num_array, ref_seq_num_array = build_num_arrays(our_seq, ref_seq)
gene_loc_list = []
gene_product_list = []
allow_one = False
all_loc_list = []
all_product_list = []
name_of_the_feature_list = []
# Experimental code for transferring 'gene' annotations from NCBI reference sequence
if args.all:
for line in open(strain + SLASH + strain + '_ref.gbk'):
if ('..' in line) and ( ('gene' in line) or ('mat_peptide' in line) or ('UTR' in line) or ('repeat_region' in line) ):
name_of_the_feature_list.append(line.split()[0])
if 'complement' in line:
whack = re.findall(r'\d+', line.split()[1])
whack.reverse()
all_loc_list.append(whack)
else:
all_loc_list.append(re.findall(r'\d+', line.split()[1]))
allow_one = True
if 'UTR' in line:
allow_one = False
all_product_list.append('')
elif allow_one :
if '/product' in line:
allow_one = False
px_all = line.split('=')[1][1:-2]
all_product_list.append(px_all)
elif '/gene' in line:
allow_one = False
px_all = line.split('=')[1][1:-2]
all_product_list.append(px_all)
elif 'UTR' in name_of_the_feature_list[-1]:
px_all = line.split('=')[1][1:-2]
all_product_list.append(px_all)
allow_one = False
elif '/rpt_type' in line:
px_all = line.split('=')[1][0:-1]
all_product_list.append(px_all)
allow_one = False
elif '/db_xref' in line:
name_of_the_feature_list.pop()
all_loc_list.pop()
allow_one = False
# adjust gene list
#print(all_loc_list)
#print(all_product_list)
#print(name_of_the_feature_list)
for entry in range(0, len(all_loc_list)):
for y in range(0, len(all_loc_list[entry])):
all_loc_list[entry][y] = adjust(int(all_loc_list[entry][y]), our_seq_num_array, ref_seq_num_array, genome)
#print("DONE WITH THE ALL STUFF")
allow_one = False
for line in open(strain + SLASH + strain + '_ref.gbk'):
if ' CDS ' in line and '..' in line:
# this is now going to be a list of numbers, start-stop start-stop
# this line simply makes sure we read in reversed start-stops in the true reversed direction
if 'complement' in line:
whack = re.findall(r'\d+', line)
whack.reverse()
gene_loc_list.append(whack)
else:
gene_loc_list.append(re.findall(r'\d+', line))
allow_one = True
if '/product="' in line and allow_one:
allow_one = False
# Inconsistent naming of protein products
px = line.split('=')[1][1:-2]
# for some weird reason - this is the way we only go in when the flag is passed
# TODO: need to make sure that this is activating properly
if args.spell_check:
new_list = []
px_word_list = px.split()
for word in px_word_list:
if '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9' or '0' not in word:
new_list.append(spell_check(word))
px = ' '.join(new_list)
if px == 'phospho protein':
px = 'phoshoprotein'
gene_product_list.append(px)
# Adjust every locus so that we actually put in correct annotations
for entry in range(0, len(gene_loc_list)):
for y in range(0, len(gene_loc_list[entry])):
gene_loc_list[entry][y] = adjust(int(gene_loc_list[entry][y]), our_seq_num_array, ref_seq_num_array, genome)
return gene_loc_list, gene_product_list, all_loc_list, all_product_list, name_of_the_feature_list
# takes a strain name and a genome and writes and saves a fasta to the correct directory
def write_fasta(strain, genome):
w = open(strain + SLASH + strain + '.fasta', 'w')
w.write('>' + strain + '\n')
w.write(genome)
w.close()
# Take the name of a virus sample, and write the .cmt file for it using supplied coverage information
# NOTE: only writes coverage length - so now if we want to say our sequencing platform we have to edit this code
# Now also writes in the comment the reference that this subission was annotated off - this should provide some more
# accountability
def write_cmt(sample_name, coverage, ref_gb, did_we_rc):
cmt = open(sample_name + SLASH + 'assembly.cmt', 'w')
cmt.write('##Assembly-Data-START##\n')
if coverage != '':
cmt.write('Coverage\t' + coverage + '\n')
if did_we_rc:
cmt.write('Original input sequence was reverse complemented by MAFFT during the alignment phase')
cmt.write('Created with VAPiD' + VERSION + ' Reference annotations were pulled from ' + ref_gb + '\n')
cmt.write('##Assembly-Data-END##\n')
cmt.close()
# this takes in all of our information and makes a feature table that contains correct annotations for for ribosomal slippage and RNA editing
# - as well as creation of a .pep file for rna editing -- Now we also pass two possibly empty lists to write tbl so we can write gene annotations
def write_tbl(strain, gene_product_list, gene_locations, genome, gene_of_intrest, note, name_o_vir, all_loc_list, all_product_list, full_name, name_of_the_feature_list):
# covers the nipah situation where there's RNA editing on more than 1 protein - if this happens for more viruses I'll need to code a more
# robust sollution, but for now this works
if 'nipah' in name_o_vir.lower():
pep = open(strain + SLASH + strain + '.pep', 'w')
tbl = open(strain + SLASH + strain + '.tbl', 'w')
tbl.write('>Feature ' + full_name)
# This block should write all gene annotations to tbl file as long as we got passed genes, and the only way that will ever happen is if the
# User put the -all flag
if len(all_product_list) > 0:
for x in range(0, len(all_product_list)):
print(all_product_list[x] + str(all_loc_list[x]))
e_flag = ''
s_flag = ''
s_all = all_loc_list[x][0]
e_all = all_loc_list[x][1]
p_all = all_product_list[x]
if int(e_all) >= len(genome):
e_flag = ''
if int(s_all) < 1:
s_flag = '<'
s_all = '1'
if int(e_all) < 1:
e_all = len(genome)
e_flag = '>'
if p_all == 'inverted terminal repeat':
if int(s_all) < (len(genome) / 2):
s_all = 1
else:
e_all = len(genome)
tbl.write('\n' + s_flag + str(s_all) + '\t' + e_flag + str(e_all) + '\trepeat_region\n')
tbl.write('\t\t\tnote\t' + p_all + '\n')
tbl.write('\t\t\trpt_type\tinverted')
else:
tbl.write('\n' + s_flag + str(s_all) + '\t' + e_flag + str(e_all) + '\t' +name_of_the_feature_list[x] + '\n')
if 'UTR' in name_of_the_feature_list[x] or 'gene' in name_of_the_feature_list[x]:
feat_des = 'gene'
elif 'mat_peptide' in name_of_the_feature_list[x] or 'CDS' in name_of_the_feature_list:
feat_des = 'product'
else:
feat_des = 'rpt_type'
tbl.write('\t\t\t' + feat_des + '\t' + p_all)
for x in range(0, len(gene_product_list)):
print(gene_product_list[x] + ' ' + str(gene_locations[x]))
flag = ''
xtra = ''
sflag = ''
product = gene_product_list[x]
if gene_of_intrest in product:
xtra = note
if 'nipah' in name_o_vir.lower():
nts_of_gene = genome[int(gene_locations[x][0]) - 1:int(gene_locations[x][1]) - 1]
if product.lower() == 'v protein':
xtra = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds non templated ' \
'G\n\t\t\tprotein_id\tn_1' + strain
start_of_poly_g = nts_of_gene.find('AAAAAGG')
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
new_translation = str(Seq(nts_of_gene).translate())
pep.write('>n_1' + strain + '\n' + new_translation)
pep.write('\n')
elif product.lower() == 'w protein':
xtra = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds non templated ' \
'G\n\t\t\tprotein_id\tn_2' + strain
start_of_poly_g = nts_of_gene.find('AAAAAGG')
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
new_translation = str(Seq(nts_of_gene).translate())
pep.write('>n_2' + strain + '\n' + new_translation)
pep.write('\n')
if 'HIV' in name_o_vir and ('Pol polyprotein' == product or 'Pol' == product):
sflag = '<'
location_info = gene_locations[x]
if len(location_info) == 4:
start_1 = str(location_info[0])
end_1 = str(location_info[1])
start_2 = str(location_info[2])
end_2 = str(location_info[3])
tbl.write('\n' + start_1 + '\t' + end_1 + '\tCDS\n')
tbl.write(start_2 + '\t' + end_2 + '\n')
tbl.write('\t\t\tproduct\t' + product + '\n')
if 'HEPATITIS B' not in name_o_vir and 'BK polyamavirus' not in name_o_vir:
tbl.write('\t\t\texception\tRibosomal Slippage\n')
else:
start = int(location_info[0])
end = int(location_info[1])
it_count = 0
modifid_orf = False
# won't execute this block of code for complemented genes
#print(genome[end - 3:end].upper())
# this makes sure that our end is in frame, and will adjust hopefully this doesn't break everything
# added a check to only do this in the absence of RNA editing or ribosomal sliippage
if xtra == '' and end != len(genome):
if ((end - start) + 1) % 3 != 0:
end +=1
if ((end - start) + 1) % 3 != 0:
end +=1
if end > start and 'IIIA' not in product.upper():
if (genome[end - 3:end].upper() not in 'TGA,TAA,TAG,UGA,UAA,UAG') and (end < len(genome) - 3) and not re.search('[MRWSYKVHDBN]',genome[end - 3:end].upper()):
if re.search('[MRWSYKVHDBN]',genome[end - 3:end].upper()):
print('Ambiguous base detected in a putative stop codon, this can cause problems with VAPiD annotations')
print('Modifying ORF length for ' + str(product))
end = find_end_stop(genome, start, end)
# This should now correctly annotate assemblies that come in with the very edges chopped off
#print(genome[end - 3:end].upper())
pie = ''
die = ''
if int(start) < 1:
sflag = '<'
pie = str((int(end) % 3) + 1)
start = '1'
if int(end) < 1:
end = len(genome)
flag = '>'
if 'HPIV-1' in name_o_vir or 'human parainfluenza virus 1' in name_o_vir.lower():
if 'C\'' in product or 'Y2' in product:
die = '\n\t\t\ttransl_except\t(pos:' + str(start) + '..' + str(int(start) + 2) + ',aa:Met)'
tbl.write('\n' + sflag + str(start) + '\t' + flag + str(end) + '\tCDS\n')
tbl.write('\t\t\tproduct\t' + product + xtra)
if pie != '':
tbl.write('\n\t\t\tcodon_start\t' + pie)
if die != '':
tbl.write(die)
tbl.write('\n')
tbl.close()
if 'nipah' in name_o_vir.lower():
pep.close()
# Takes a nucleotide sequence and a start and end position [1 indexed] and search for a stop codon from the start
# to the end + 60 so every codon in the provided gene and then 3 after it. Return the first stop codon found or if no
# stop codon is found return the original end value and print a warning
def find_end_stop(genome, start, end):
# save the provided end
start -= 1
old_end = end
end = start + 3
# Search for stop codons in DNA and RNA space until 3 codons after the provided end.
# Turns out 3 codons isn't enough
while genome[end -3:end].upper() not in 'TGA,TAA,TAG,UGA,UAA,UAG' and end <= (old_end + 60):
end += 3
if end == old_end + 60:
print('WARNING no stop codon found, examine reference and original sequence')
return old_end
else:
return end
# takes a single strain name and a single genome and annotates and save the entire virus and annotations package
# returns the "species" of the virus for consolidated .sqn packaging
def annotate_a_virus(strain, genome, metadata, coverage, sbt_loc, full_name, nuc_a_type):
did_we_reverse_complement = False
if not os.path.exists(strain):
os.makedirs(strain)
if '_R_' in strain:
if strain[0:3] == '_R_':
print('WARNING: ' + strain + ' has _R_ as the first part of the sequence charachters YOU HAVE TO CHANGE THIS')
write_fasta(strain, genome)
name_of_virus, our_seq, ref_seq, ref_accession, need_to_rc = blast_n_stuff(strain, strain + SLASH + strain + '.fasta')
if need_to_rc:
print('Input sequence needed to be reverse complemented to align properly.')
new_seq = Seq(genome)
# reverse complement input sequence and overwrite variable
genome = str(new_seq.reverse_complement())
did_we_reverse_complement = True
# overwrite our fasta and this happens before we call write fsa
write_fasta(strain, genome)
name_of_virus, our_seq, ref_seq, ref_accession, need_to_rc = blast_n_stuff(strain, strain + SLASH + strain + '.fasta')
gene_loc_list, gene_product_list, all_loc_list, all_product_list, name_of_the_feature_list = pull_correct_annotations(strain, our_seq, ref_seq, genome)
write_cmt(strain, coverage,ref_accession, did_we_reverse_complement)
write_fsa(strain, name_of_virus, genome, metadata, full_name, nuc_a_type)
extra_stuff = ''
# prime gene of interest so unless we're in one of the specific cases nothing will trigger
gene_of_interest = 'XFNDKLS:NLFKSD:FJNSDLKFJDSLKFJDLFUHE:OPUHFE:LUHILDLKFJNSDLFKJBNDLKFUHSLDUBFKNLKDFJBLSKDJFBLDKS'
if 'respirovirus' in name_of_virus.lower() or 'parainfluenza virus 3' in name_of_virus.lower():
if '3' in name_of_virus:
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds non templated ' \
'Gs\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'D protein'
process_para(strain, genome, gene_loc_list, gene_product_list, 'D protein', 'HP3')
elif '1' in name_of_virus:
extra_stuff = 'WEGOTAPARA1'
gene_of_interest ='C\' protein'
if 'parainfluenza virus 4' in name_of_virus.lower():
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds 2 non templated ' \
'G\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'phosphoprotein'
if 'P' in gene_product_list:
gene_of_interest = 'P'
elif 'P protein' in gene_product_list:
gene_of_interest = 'P protein'
process_para(strain, genome, gene_loc_list, gene_product_list, gene_of_interest, 'HPIV4a')
if 'measles' in name_of_virus.lower():
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds 1 non templated ' \
'G\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'V protein'
process_para(strain, genome, gene_loc_list, gene_product_list, 'V protein', 'MEAS')
if 'mumps' in name_of_virus.lower():
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds 2 non templated ' \
'G\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'phosphoprotein'
process_para(strain, genome, gene_loc_list, gene_product_list, gene_of_interest, 'MUMP')
if 'rubulavirus 4' in name_of_virus:
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds 2 non templated ' \
'Gs\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'phosphoprotein'
process_para(strain, genome, gene_loc_list, gene_product_list, 'phoshoprotein', 'HP4-1')
if 'metapneumovirus' in name_of_virus.lower():
put_start = int(gene_loc_list[7][0])
#print('start of gene is ' + str(put_start))
orf = genome[put_start - 1:put_start + 4000]
#print('orf length is ' + str(len(orf)))
#print('genome length is ' + str(len(genome)))
orf_trans = str(Seq(orf).translate())
#print('orf translation is ' + orf_trans)
if orf_trans.find('*') != -1:
put_end = (orf_trans.find('*') * 3)
print('putative end is ' + str(put_end))
gene_loc_list[7][1] = put_start + put_end
if 'parainfluenza virus 2' in name_of_virus.lower() or 'rubulavirus 2' in name_of_virus.lower():
#print('Custom code for HPIV2 runnning')
extra_stuff = '\n\t\t\texception\tRNA Editing\n\t\t\tnote\tRNA Polymerase adds 2 non templated ' \
'G\n\t\t\tprotein_id\tn_' + strain
gene_of_interest = 'P protein'
process_para(strain, genome, gene_loc_list, gene_product_list, gene_of_interest, 'HPIV2')
write_tbl(strain, gene_product_list, gene_loc_list, genome, gene_of_interest, extra_stuff, name_of_virus, all_loc_list, all_product_list, full_name, name_of_the_feature_list)
cmd = 'tbl2asn -p ' + strain + SLASH + ' -t ' + sbt_loc + ' -Y ' + strain + SLASH + 'assembly.cmt -V vb '
try:
subprocess.call(cmd, shell=True)
except:
print('tbl2asn not installed, go to https://www.ncbi.nlm.nih.gov/genbank/tbl2asn2/ and download the appropriate version')
print('Done with: ' + strain)
print('')
print('')
return name_of_virus
# This function takes a nucleotide sequence that has non-templated G's inserted an unknown number of times and trims
# Them from the end to read into a correct frame - translates the sequence and picks the one with the least number of
# stop codons returns the raw sequence
def pick_correct_frame(one, two):
print('Picking correct reading frame for RNA editing:')
# we already added the G's to the start - and generally we hit the stop codon exactly where the annotation says we
# will so this just avoids some weirdness with passing sequences of length not divisible by three to seq.translate()
while len(one) % 3 != 0:
one = one[:-1]
while len(two) % 3 != 0:
two = two[:-1]
one_trans = str(Seq(one).translate())
two_trans = str(Seq(two).translate())
one_count = one_trans.count('*')
two_count = two_trans.count('*')
# Troubleshooting code that I'm keeping in for when we add more viruses that have non templated G's
#print('adding two Gs gives ' + str(two_count) + ' stop codon(s)')
#print(two_trans)
#print('adding one G gives ' + str(one_count) + ' stop codon(s)')
#print(one_trans)
if one_count < two_count:
#print('chose one')
return one
else:
#print('chose two')
return two
# Takes a virus that has GGGGGG RNA editing and based on the gene that you send it and the name of the virus will
# find that gene in our annotations - add the correct number of G's and then translate the new 'mRNA' and write the
# translation to a .pep file where we can overwrite the sequin auto-translation
def process_para(strain, genome, gene_loc_list, gene_product_list, gene_of_interest, v):
# Extract the gene protected because everything we throw in here are guaranteed to have the gene of interest
print('Looks like this virus has RNA editing, fixing it now')
#print(v)
found_ = False
#print('gene of interest = '+ gene_of_interest)
for g in range(0, len(gene_product_list)):
# flipping this covers whack spacing in protein products
#print('product = '+ gene_product_list[g])
if gene_of_interest in gene_product_list[g]:
nts_of_gene = genome[int(gene_loc_list[g][0]) - 1:int(gene_loc_list[g][1]) - 1]
found_ = True
break
if found_:
# add the correct number of Gs
if v == 'HP3':
start_of_poly_g = nts_of_gene.find('GGGGG', 700, 740)
nts_of_gene_1 = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene_2 = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene = pick_correct_frame(nts_of_gene_1, nts_of_gene_2)
# despite viral zone saying SENDAI adds 1 G adding two removes stop codon's - remains tbd if variable
elif v == 'SENDAI':
start_of_poly_g = nts_of_gene.find('AAAAGGG')
nts_of_gene_1 = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene_2 = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene = pick_correct_frame(nts_of_gene_1, nts_of_gene_2)
elif v == 'HP4-1':
start_of_poly_g = nts_of_gene.find('AAGAGG', 435, 460)
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
elif v == 'MUMP':
start_of_poly_g = nts_of_gene.find('AAGAGG', 445, 465)
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
elif v == 'MEAS':
start_of_poly_g = nts_of_gene.find('AAAAAGG', 674, 695)
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
elif v == 'NIPAH':
start_of_poly_g = nts_of_gene.find('AAAAAAGG', 705, 725)
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
elif v == 'HPIV4a':
start_of_poly_g = nts_of_gene.find('AAGAGG', 439, 460)
nts_of_gene = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
elif v == 'HPIV2':
#print('HPIV2 is getting here correctly')
start_of_poly_g = nts_of_gene.find('AAGAGG', 450, 490)
nts_of_gene_1 = nts_of_gene[0:start_of_poly_g + 1] + 'G' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene_2 = nts_of_gene[0:start_of_poly_g + 1] + 'GG' + nts_of_gene[start_of_poly_g + 1:]
nts_of_gene = pick_correct_frame(nts_of_gene_1, nts_of_gene_2)
new_translation = str(Seq(nts_of_gene).translate())
pep = open(strain + SLASH + strain + '.pep', 'w')
pep.write('>n_' + strain + '\n' + new_translation)
pep.write('\n')
pep.close()
# Writes an fsa file based of the name, strain and genome, honestly we should allow for much more flexibility
# and automation here
def write_fsa(strain, name_of_virus, virus_genome, metadata, full_name, nucleic_acid_type):
fsa = open(strain + SLASH + strain + '.fsa', 'w')
fsa.write('>' + full_name.strip() + ' [organism=' + name_of_virus + ']' + '[moltype=genomic] [host=Human] [gcode=1] '
'[molecule=' + nucleic_acid_type + ']' + metadata + '\n')
fsa.write(virus_genome)
fsa.write('\n')
fsa.close()
# Build the metadata for every virus that's been submitted
def do_meta_data(strain, sheet_exists, full_name):
first = True
s = ''
coverage = ''
if sheet_exists:
for line in open(metadata_sheet_location):
if first:
names = line.split(',')
first = False
elif line.split(',')[0] == strain:
for dex in range(0, len(names)):
if names[dex].strip() == 'coverage':
coverage = line.split(',')[dex].strip()
elif names[dex].strip() == 'full_name':
if line.split(',')[dex].strip() != '':
full_name = line.split(',')[dex].strip()
else:
s = s + ' [' + names[dex].strip() + '=' + line.split(',')[dex].strip() + ']'
break
if s == '':
print('metadata not found in provided .csv or .csv not created - time for minimal manual entry for sequence - ' + strain)
col = ' [collection-date=' + raw_input('Enter collection date in the format (23-Mar-2005, Mar-2005, or 2005): ').strip() + ']'
con = ' [country=' + raw_input('Enter country sample was collected in (example - USA): ').strip() + ']'
st = ' [strain=' + raw_input('Enter strain name - if unknown just put ' + strain + ': ').strip() + ']'
cov = raw_input('Enter coverage as a number (example 42.3), if unknown just leave this blank and hit enter: ')
meta_data = col + con + st
coverage = cov
# Here's one line of code to unilaterally standardize defualt naming scheme
if full_name == '':
full_name = strain + ' (' + con.split('=')[1][:-1] + '/' + col.split('=')[1][:-1] + ')'
else:
meta_data = s
if full_name == '':
full_name = strain
print('Automatic strain naming failed but submission will proceed without metadata appended to the fasta header.')
return meta_data, coverage, full_name
# Takes the name of a recently created .gbf file and checks it for stop codons (which usually indicate something went
# wrong. NOTE: requires tbl2asn to have successfully created a .gbf file or this will fail catastrophically
# Now also returns if stop codons are in it or not so they'll be omitted during the packaging phase
def check_for_stops(sample_name):
stops = 0
for line in open(sample_name + SLASH + sample_name + '.gbf'):
if '*' in line:
stops += 1
if stops > 0:
print('WARNING: ' + sample_name + ' contains ' + str(stops) + ' stop codon(s)!')
return True
else:
return False
# quick check to make sure slashes go the right way on both Windows and Mac/Linux
def check_os():
if platform.system() == 'Linux' or platform.system() == 'Darwin':
return '/'
else:
return '\\'
if __name__ == '__main__':
start_time = timeit.default_timer()
SLASH = check_os()
parser = argparse.ArgumentParser(description='Version ' + VERSION + '\nPrepares FASTA file for NCBI Genbank submission '
'through local or online blastn-based annotation of viral sequences. '
'In default mode, VAPiD searches this folder for our viral databases.')
parser.add_argument('fasta_file', help='Input file in .fasta format containing complete or near complete '
'genomes for all the viruses that you want to have annotated')
parser.add_argument('author_template_file_loc', help='File path for the NCBI-provided sequence author template file'
' (should have a .sbt extension)\n https://submit.ncbi.nlm.nih.gov/genbank/template/submission/')
parser.add_argument('--metadata_loc', help='If you\'ve input the metadata in the provided csv, specify the location '
'with this optional argument. Otherwise all metadata will be manually prompted for.')
parser.add_argument('--r', help='If you want to specify a specific NCBI reference, put the accession number here '
'- must be the exact accession number - note: feature forces all sequences in FASTA to be this viral species.')
parser.add_argument('--f', help='specify a custom gbf file that you would like to annotate off of')
parser.add_argument('--db', help='specify the local blast database name. You MUST have blast+ with blastn'
'installed correctly on your system path for this to work.')
parser.add_argument('--online',action='store_true', help='Force VAPiD to blast against online database. This is good for machines that don\'t '
'have blast+ installed or if the virus is really strange.'
'Warning: this can be EXTREMELY slow, up to ~5-25 minutes a virus')
parser.add_argument('--spell_check', action='store_true', help='Turn on spellchecking for protein annoations ')
parser.add_argument('--all', action='store_true', help='Use this flag to transfer ALL annotations from reference, this is largely untested')
parser.add_argument('--slashes', action='store_true', help='Use this flag to allow any characters in the name of your virus - This allows '
'you to submit with a fasta file formated like >Sample1 (Human/USA/2016/A) Complete CDS'
' make sure that your metadata file only contains the first part of your name \'Sample1\' in the example above. '
'You can also submit names with slashes by specifying in the metadata sheet under the header full_name, if you do that '
'you do not need to use this flag')
parser.add_argument('--dna', action='store_true', help='Make all files annotated by this run be marked as DNA instead of the default (RNA)')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
fasta_loc = args.fasta_file
if args.dna:
nuc_acid_type = 'DNA'
else:
nuc_acid_type = 'RNA'
sbt_file_loc = args.author_template_file_loc
virus_strain_list, virus_genome_list, full_name_list = read_fasta(fasta_loc, args.slashes)
strain2species = {}
strain2stops = {}
meta_list = []
coverage_list = []
if args.metadata_loc:
metadata_sheet_location = args.metadata_loc
for x in range(0, len(virus_strain_list)):
metadata, coverage, full_name_list[x] = do_meta_data(virus_strain_list[x], True, full_name_list[x])
meta_list.append(metadata)
coverage_list.append(coverage)
else:
for x in range(0, len(virus_strain_list)):
metadata, coverage, full_name_list[x] = do_meta_data(virus_strain_list[x], False, full_name_list[x])
meta_list.append(metadata)
coverage_list.append(coverage)
for x in range(0, len(virus_strain_list)):
strain2species[virus_strain_list[x]] = annotate_a_virus(virus_strain_list[x], virus_genome_list[x],
meta_list[x], coverage_list[x], sbt_file_loc, full_name_list[x],nuc_acid_type)
# now we've got a map of [strain] -> name of virus (with whitespace)
for name in virus_strain_list:
# now we've got a map of [strain] -> boolean value if there are stops or not
strain2stops[name] = check_for_stops(name)
if len(name) > 23:
print('WARNING: ' + name + ' is over 23 characters, which means that your gbf file will be corrupted')
time = str(timeit.default_timer() - start_time)
newtime = time.split('.')[0] + '.' + time.split('.')[1][0:1]
print('Done, did ' + str(len(virus_strain_list)) + ' viruses in ' + newtime + ' seconds')
|
rcs333/ClinVirusSeq
|
vapid.py
|
Python
|
mit
| 47,219
|
[
"BLAST"
] |
4588fd00bc15a2ebeea104fa4b3038bff901ded17b4af39a47885f9c96ad422e
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.datalabeling_v1beta1.services.data_labeling_service import (
DataLabelingServiceAsyncClient,
)
from google.cloud.datalabeling_v1beta1.services.data_labeling_service import (
DataLabelingServiceClient,
)
from google.cloud.datalabeling_v1beta1.services.data_labeling_service import pagers
from google.cloud.datalabeling_v1beta1.services.data_labeling_service import transports
from google.cloud.datalabeling_v1beta1.types import annotation
from google.cloud.datalabeling_v1beta1.types import annotation_spec_set
from google.cloud.datalabeling_v1beta1.types import (
annotation_spec_set as gcd_annotation_spec_set,
)
from google.cloud.datalabeling_v1beta1.types import data_labeling_service
from google.cloud.datalabeling_v1beta1.types import data_payloads
from google.cloud.datalabeling_v1beta1.types import dataset
from google.cloud.datalabeling_v1beta1.types import dataset as gcd_dataset
from google.cloud.datalabeling_v1beta1.types import evaluation
from google.cloud.datalabeling_v1beta1.types import evaluation_job
from google.cloud.datalabeling_v1beta1.types import evaluation_job as gcd_evaluation_job
from google.cloud.datalabeling_v1beta1.types import human_annotation_config
from google.cloud.datalabeling_v1beta1.types import instruction
from google.cloud.datalabeling_v1beta1.types import instruction as gcd_instruction
from google.cloud.datalabeling_v1beta1.types import operations
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DataLabelingServiceClient._get_default_mtls_endpoint(None) is None
assert (
DataLabelingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DataLabelingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DataLabelingServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DataLabelingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DataLabelingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [DataLabelingServiceClient, DataLabelingServiceAsyncClient,]
)
def test_data_labeling_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datalabeling.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DataLabelingServiceGrpcTransport, "grpc"),
(transports.DataLabelingServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_data_labeling_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [DataLabelingServiceClient, DataLabelingServiceAsyncClient,]
)
def test_data_labeling_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datalabeling.googleapis.com:443"
def test_data_labeling_service_client_get_transport_class():
transport = DataLabelingServiceClient.get_transport_class()
available_transports = [
transports.DataLabelingServiceGrpcTransport,
]
assert transport in available_transports
transport = DataLabelingServiceClient.get_transport_class("grpc")
assert transport == transports.DataLabelingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DataLabelingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceClient),
)
@mock.patch.object(
DataLabelingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceAsyncClient),
)
def test_data_labeling_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DataLabelingServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DataLabelingServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
"true",
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
"false",
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DataLabelingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceClient),
)
@mock.patch.object(
DataLabelingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_data_labeling_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [DataLabelingServiceClient, DataLabelingServiceAsyncClient]
)
@mock.patch.object(
DataLabelingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceClient),
)
@mock.patch.object(
DataLabelingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataLabelingServiceAsyncClient),
)
def test_data_labeling_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_data_labeling_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_labeling_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_data_labeling_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.datalabeling_v1beta1.services.data_labeling_service.transports.DataLabelingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DataLabelingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DataLabelingServiceClient,
transports.DataLabelingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_labeling_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"datalabeling.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="datalabeling.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.CreateDatasetRequest, dict,]
)
def test_create_dataset(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
data_item_count=1584,
)
response = client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
assert response.data_item_count == 1584
def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
client.create_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateDatasetRequest()
@pytest.mark.asyncio
async def test_create_dataset_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.CreateDatasetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
data_item_count=1584,
)
)
response = await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
assert response.data_item_count == 1584
@pytest.mark.asyncio
async def test_create_dataset_async_from_dict():
await test_create_dataset_async(request_type=dict)
def test_create_dataset_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = gcd_dataset.Dataset()
client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_dataset_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_dataset.Dataset())
await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_dataset_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_dataset(
parent="parent_value", dataset=gcd_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].dataset
mock_val = gcd_dataset.Dataset(name="name_value")
assert arg == mock_val
def test_create_dataset_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_dataset(
data_labeling_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gcd_dataset.Dataset(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_dataset_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_dataset(
parent="parent_value", dataset=gcd_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].dataset
mock_val = gcd_dataset.Dataset(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_dataset_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_dataset(
data_labeling_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gcd_dataset.Dataset(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetDatasetRequest, dict,]
)
def test_get_dataset(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
data_item_count=1584,
)
response = client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
assert response.data_item_count == 1584
def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
client.get_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDatasetRequest()
@pytest.mark.asyncio
async def test_get_dataset_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetDatasetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
data_item_count=1584,
)
)
response = await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
assert response.data_item_count == 1584
@pytest.mark.asyncio
async def test_get_dataset_async_from_dict():
await test_get_dataset_async(request_type=dict)
def test_get_dataset_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_dataset_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_dataset_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_dataset_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_dataset(
data_labeling_service.GetDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_dataset_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_dataset_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_dataset(
data_labeling_service.GetDatasetRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListDatasetsRequest, dict,]
)
def test_list_datasets(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
client.list_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDatasetsRequest()
@pytest.mark.asyncio
async def test_list_datasets_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListDatasetsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_datasets_async_from_dict():
await test_list_datasets_async(request_type=dict)
def test_list_datasets_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = data_labeling_service.ListDatasetsResponse()
client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_datasets_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDatasetsResponse()
)
await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_datasets_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDatasetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_datasets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_datasets_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_datasets(
data_labeling_service.ListDatasetsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_datasets_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDatasetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDatasetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_datasets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_datasets_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_datasets(
data_labeling_service.ListDatasetsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_datasets_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
data_labeling_service.ListDatasetsResponse(
datasets=[], next_page_token="def",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_datasets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.Dataset) for i in results)
def test_list_datasets_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
data_labeling_service.ListDatasetsResponse(
datasets=[], next_page_token="def",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = list(client.list_datasets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_datasets_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
data_labeling_service.ListDatasetsResponse(
datasets=[], next_page_token="def",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
async_pager = await client.list_datasets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.Dataset) for i in responses)
@pytest.mark.asyncio
async def test_list_datasets_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
data_labeling_service.ListDatasetsResponse(
datasets=[], next_page_token="def",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
data_labeling_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_datasets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.DeleteDatasetRequest, dict,]
)
def test_delete_dataset(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
client.delete_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteDatasetRequest()
@pytest.mark.asyncio
async def test_delete_dataset_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.DeleteDatasetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_dataset_async_from_dict():
await test_delete_dataset_async(request_type=dict)
def test_delete_dataset_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = None
client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_dataset_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_dataset_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_dataset_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_dataset(
data_labeling_service.DeleteDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_dataset_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_dataset_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_dataset(
data_labeling_service.DeleteDatasetRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ImportDataRequest, dict,]
)
def test_import_data(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
client.import_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ImportDataRequest()
@pytest.mark.asyncio
async def test_import_data_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ImportDataRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_data_async_from_dict():
await test_import_data_async(request_type=dict)
def test_import_data_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_data_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_import_data_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_data(
name="name_value",
input_config=dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].input_config
mock_val = dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
)
assert arg == mock_val
def test_import_data_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.import_data(
data_labeling_service.ImportDataRequest(),
name="name_value",
input_config=dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
),
)
@pytest.mark.asyncio
async def test_import_data_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.import_data(
name="name_value",
input_config=dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].input_config
mock_val = dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_import_data_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.import_data(
data_labeling_service.ImportDataRequest(),
name="name_value",
input_config=dataset.InputConfig(
text_metadata=dataset.TextMetadata(language_code="language_code_value")
),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ExportDataRequest, dict,]
)
def test_export_data(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
client.export_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ExportDataRequest()
@pytest.mark.asyncio
async def test_export_data_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ExportDataRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_data_async_from_dict():
await test_export_data_async(request_type=dict)
def test_export_data_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_data_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_export_data_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_data(
name="name_value",
annotated_dataset="annotated_dataset_value",
filter="filter_value",
output_config=dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].annotated_dataset
mock_val = "annotated_dataset_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
arg = args[0].output_config
mock_val = dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
)
assert arg == mock_val
def test_export_data_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_data(
data_labeling_service.ExportDataRequest(),
name="name_value",
annotated_dataset="annotated_dataset_value",
filter="filter_value",
output_config=dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
),
)
@pytest.mark.asyncio
async def test_export_data_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_data(
name="name_value",
annotated_dataset="annotated_dataset_value",
filter="filter_value",
output_config=dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].annotated_dataset
mock_val = "annotated_dataset_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
arg = args[0].output_config
mock_val = dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_export_data_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_data(
data_labeling_service.ExportDataRequest(),
name="name_value",
annotated_dataset="annotated_dataset_value",
filter="filter_value",
output_config=dataset.OutputConfig(
gcs_destination=dataset.GcsDestination(output_uri="output_uri_value")
),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetDataItemRequest, dict,]
)
def test_get_data_item(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.DataItem(
name="name_value",
image_payload=data_payloads.ImagePayload(mime_type="mime_type_value"),
)
response = client.get_data_item(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDataItemRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.DataItem)
assert response.name == "name_value"
def test_get_data_item_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
client.get_data_item()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDataItemRequest()
@pytest.mark.asyncio
async def test_get_data_item_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetDataItemRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.DataItem(name="name_value",)
)
response = await client.get_data_item(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetDataItemRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.DataItem)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_data_item_async_from_dict():
await test_get_data_item_async(request_type=dict)
def test_get_data_item_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetDataItemRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
call.return_value = dataset.DataItem()
client.get_data_item(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_data_item_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetDataItemRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.DataItem())
await client.get_data_item(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_data_item_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.DataItem()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_data_item(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_data_item_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_data_item(
data_labeling_service.GetDataItemRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_data_item_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_data_item), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.DataItem()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.DataItem())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_data_item(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_data_item_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_data_item(
data_labeling_service.GetDataItemRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListDataItemsRequest, dict,]
)
def test_list_data_items(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
response = client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_data_items_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
client.list_data_items()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDataItemsRequest()
@pytest.mark.asyncio
async def test_list_data_items_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListDataItemsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_data_items_async_from_dict():
await test_list_data_items_async(request_type=dict)
def test_list_data_items_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = data_labeling_service.ListDataItemsResponse()
client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_data_items_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDataItemsResponse()
)
await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_data_items_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDataItemsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_data_items(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_data_items_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_data_items(
data_labeling_service.ListDataItemsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_data_items_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListDataItemsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListDataItemsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_data_items(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_data_items_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_data_items(
data_labeling_service.ListDataItemsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_data_items_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDataItemsResponse(
data_items=[
dataset.DataItem(),
dataset.DataItem(),
dataset.DataItem(),
],
next_page_token="abc",
),
data_labeling_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(),], next_page_token="ghi",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(), dataset.DataItem(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_data_items(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.DataItem) for i in results)
def test_list_data_items_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDataItemsResponse(
data_items=[
dataset.DataItem(),
dataset.DataItem(),
dataset.DataItem(),
],
next_page_token="abc",
),
data_labeling_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(),], next_page_token="ghi",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(), dataset.DataItem(),],
),
RuntimeError,
)
pages = list(client.list_data_items(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_data_items_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDataItemsResponse(
data_items=[
dataset.DataItem(),
dataset.DataItem(),
dataset.DataItem(),
],
next_page_token="abc",
),
data_labeling_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(),], next_page_token="ghi",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(), dataset.DataItem(),],
),
RuntimeError,
)
async_pager = await client.list_data_items(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.DataItem) for i in responses)
@pytest.mark.asyncio
async def test_list_data_items_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListDataItemsResponse(
data_items=[
dataset.DataItem(),
dataset.DataItem(),
dataset.DataItem(),
],
next_page_token="abc",
),
data_labeling_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(),], next_page_token="ghi",
),
data_labeling_service.ListDataItemsResponse(
data_items=[dataset.DataItem(), dataset.DataItem(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_data_items(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetAnnotatedDatasetRequest, dict,]
)
def test_get_annotated_dataset(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.AnnotatedDataset(
name="name_value",
display_name="display_name_value",
description="description_value",
annotation_source=annotation.AnnotationSource.OPERATOR,
annotation_type=annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION,
example_count=1396,
completed_example_count=2448,
blocking_resources=["blocking_resources_value"],
)
response = client.get_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotatedDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.AnnotatedDataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.annotation_source == annotation.AnnotationSource.OPERATOR
assert (
response.annotation_type
== annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION
)
assert response.example_count == 1396
assert response.completed_example_count == 2448
assert response.blocking_resources == ["blocking_resources_value"]
def test_get_annotated_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
client.get_annotated_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotatedDatasetRequest()
@pytest.mark.asyncio
async def test_get_annotated_dataset_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetAnnotatedDatasetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.AnnotatedDataset(
name="name_value",
display_name="display_name_value",
description="description_value",
annotation_source=annotation.AnnotationSource.OPERATOR,
annotation_type=annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION,
example_count=1396,
completed_example_count=2448,
blocking_resources=["blocking_resources_value"],
)
)
response = await client.get_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotatedDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.AnnotatedDataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.annotation_source == annotation.AnnotationSource.OPERATOR
assert (
response.annotation_type
== annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION
)
assert response.example_count == 1396
assert response.completed_example_count == 2448
assert response.blocking_resources == ["blocking_resources_value"]
@pytest.mark.asyncio
async def test_get_annotated_dataset_async_from_dict():
await test_get_annotated_dataset_async(request_type=dict)
def test_get_annotated_dataset_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetAnnotatedDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
call.return_value = dataset.AnnotatedDataset()
client.get_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_annotated_dataset_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetAnnotatedDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.AnnotatedDataset()
)
await client.get_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_annotated_dataset_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.AnnotatedDataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotated_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_annotated_dataset_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_annotated_dataset(
data_labeling_service.GetAnnotatedDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_annotated_dataset_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.AnnotatedDataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.AnnotatedDataset()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_annotated_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_annotated_dataset_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_annotated_dataset(
data_labeling_service.GetAnnotatedDatasetRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListAnnotatedDatasetsRequest, dict,]
)
def test_list_annotated_datasets(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotatedDatasetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_annotated_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotatedDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotatedDatasetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_annotated_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
client.list_annotated_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotatedDatasetsRequest()
@pytest.mark.asyncio
async def test_list_annotated_datasets_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListAnnotatedDatasetsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotatedDatasetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_annotated_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotatedDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotatedDatasetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_annotated_datasets_async_from_dict():
await test_list_annotated_datasets_async(request_type=dict)
def test_list_annotated_datasets_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListAnnotatedDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
call.return_value = data_labeling_service.ListAnnotatedDatasetsResponse()
client.list_annotated_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_annotated_datasets_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListAnnotatedDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotatedDatasetsResponse()
)
await client.list_annotated_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_annotated_datasets_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotatedDatasetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_annotated_datasets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_annotated_datasets_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_annotated_datasets(
data_labeling_service.ListAnnotatedDatasetsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_annotated_datasets_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotatedDatasetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotatedDatasetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_annotated_datasets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_annotated_datasets_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_annotated_datasets(
data_labeling_service.ListAnnotatedDatasetsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_annotated_datasets_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[], next_page_token="def",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[dataset.AnnotatedDataset(),], next_page_token="ghi",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_annotated_datasets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.AnnotatedDataset) for i in results)
def test_list_annotated_datasets_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[], next_page_token="def",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[dataset.AnnotatedDataset(),], next_page_token="ghi",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
),
RuntimeError,
)
pages = list(client.list_annotated_datasets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_annotated_datasets_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[], next_page_token="def",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[dataset.AnnotatedDataset(),], next_page_token="ghi",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
),
RuntimeError,
)
async_pager = await client.list_annotated_datasets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.AnnotatedDataset) for i in responses)
@pytest.mark.asyncio
async def test_list_annotated_datasets_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotated_datasets),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[], next_page_token="def",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[dataset.AnnotatedDataset(),], next_page_token="ghi",
),
data_labeling_service.ListAnnotatedDatasetsResponse(
annotated_datasets=[
dataset.AnnotatedDataset(),
dataset.AnnotatedDataset(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_annotated_datasets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.DeleteAnnotatedDatasetRequest, dict,]
)
def test_delete_annotated_dataset(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotatedDatasetRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_annotated_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotated_dataset), "__call__"
) as call:
client.delete_annotated_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotatedDatasetRequest()
@pytest.mark.asyncio
async def test_delete_annotated_dataset_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.DeleteAnnotatedDatasetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotated_dataset), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotatedDatasetRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_annotated_dataset_async_from_dict():
await test_delete_annotated_dataset_async(request_type=dict)
def test_delete_annotated_dataset_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteAnnotatedDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotated_dataset), "__call__"
) as call:
call.return_value = None
client.delete_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_annotated_dataset_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteAnnotatedDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotated_dataset), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_annotated_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [data_labeling_service.LabelImageRequest, dict,]
)
def test_label_image(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.label_image(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelImageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_label_image_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
client.label_image()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelImageRequest()
@pytest.mark.asyncio
async def test_label_image_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.LabelImageRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.label_image(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelImageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_label_image_async_from_dict():
await test_label_image_async(request_type=dict)
def test_label_image_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelImageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.label_image(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_label_image_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelImageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.label_image(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_label_image_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.label_image(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION
assert arg == mock_val
def test_label_image_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.label_image(
data_labeling_service.LabelImageRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION,
)
@pytest.mark.asyncio
async def test_label_image_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_image), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.label_image(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION
assert arg == mock_val
@pytest.mark.asyncio
async def test_label_image_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.label_image(
data_labeling_service.LabelImageRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelImageRequest.Feature.CLASSIFICATION,
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.LabelVideoRequest, dict,]
)
def test_label_video(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.label_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_label_video_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
client.label_video()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelVideoRequest()
@pytest.mark.asyncio
async def test_label_video_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.LabelVideoRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.label_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_label_video_async_from_dict():
await test_label_video_async(request_type=dict)
def test_label_video_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelVideoRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.label_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_label_video_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelVideoRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.label_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_label_video_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.label_video(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION
assert arg == mock_val
def test_label_video_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.label_video(
data_labeling_service.LabelVideoRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION,
)
@pytest.mark.asyncio
async def test_label_video_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.label_video(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION
assert arg == mock_val
@pytest.mark.asyncio
async def test_label_video_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.label_video(
data_labeling_service.LabelVideoRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelVideoRequest.Feature.CLASSIFICATION,
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.LabelTextRequest, dict,]
)
def test_label_text(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.label_text(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelTextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_label_text_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
client.label_text()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelTextRequest()
@pytest.mark.asyncio
async def test_label_text_async(
transport: str = "grpc_asyncio", request_type=data_labeling_service.LabelTextRequest
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.label_text(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.LabelTextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_label_text_async_from_dict():
await test_label_text_async(request_type=dict)
def test_label_text_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelTextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.label_text(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_label_text_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.LabelTextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.label_text(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_label_text_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.label_text(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION
assert arg == mock_val
def test_label_text_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.label_text(
data_labeling_service.LabelTextRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION,
)
@pytest.mark.asyncio
async def test_label_text_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.label_text), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.label_text(
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].basic_config
mock_val = human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
)
assert arg == mock_val
arg = args[0].feature
mock_val = data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION
assert arg == mock_val
@pytest.mark.asyncio
async def test_label_text_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.label_text(
data_labeling_service.LabelTextRequest(),
parent="parent_value",
basic_config=human_annotation_config.HumanAnnotationConfig(
instruction="instruction_value"
),
feature=data_labeling_service.LabelTextRequest.Feature.TEXT_CLASSIFICATION,
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetExampleRequest, dict,]
)
def test_get_example(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Example(
name="name_value",
image_payload=data_payloads.ImagePayload(mime_type="mime_type_value"),
)
response = client.get_example(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetExampleRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Example)
assert response.name == "name_value"
def test_get_example_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
client.get_example()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetExampleRequest()
@pytest.mark.asyncio
async def test_get_example_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetExampleRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Example(name="name_value",)
)
response = await client.get_example(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetExampleRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Example)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_example_async_from_dict():
await test_get_example_async(request_type=dict)
def test_get_example_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetExampleRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
call.return_value = dataset.Example()
client.get_example(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_example_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetExampleRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Example())
await client.get_example(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_example_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Example()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_example(
name="name_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_get_example_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_example(
data_labeling_service.GetExampleRequest(),
name="name_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_get_example_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_example), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Example()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Example())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_example(name="name_value", filter="filter_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_example_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_example(
data_labeling_service.GetExampleRequest(),
name="name_value",
filter="filter_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListExamplesRequest, dict,]
)
def test_list_examples(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListExamplesResponse(
next_page_token="next_page_token_value",
)
response = client.list_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListExamplesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExamplesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_examples_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
client.list_examples()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListExamplesRequest()
@pytest.mark.asyncio
async def test_list_examples_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListExamplesRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListExamplesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListExamplesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExamplesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_examples_async_from_dict():
await test_list_examples_async(request_type=dict)
def test_list_examples_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListExamplesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
call.return_value = data_labeling_service.ListExamplesResponse()
client.list_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_examples_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListExamplesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListExamplesResponse()
)
await client.list_examples(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_examples_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListExamplesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_examples(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_examples_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_examples(
data_labeling_service.ListExamplesRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_examples_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListExamplesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListExamplesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_examples(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_examples_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_examples(
data_labeling_service.ListExamplesRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_examples_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(), dataset.Example(),],
next_page_token="abc",
),
data_labeling_service.ListExamplesResponse(
examples=[], next_page_token="def",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(),], next_page_token="ghi",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_examples(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.Example) for i in results)
def test_list_examples_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_examples), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(), dataset.Example(),],
next_page_token="abc",
),
data_labeling_service.ListExamplesResponse(
examples=[], next_page_token="def",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(),], next_page_token="ghi",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(),],
),
RuntimeError,
)
pages = list(client.list_examples(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_examples_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_examples), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(), dataset.Example(),],
next_page_token="abc",
),
data_labeling_service.ListExamplesResponse(
examples=[], next_page_token="def",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(),], next_page_token="ghi",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(),],
),
RuntimeError,
)
async_pager = await client.list_examples(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.Example) for i in responses)
@pytest.mark.asyncio
async def test_list_examples_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_examples), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(), dataset.Example(),],
next_page_token="abc",
),
data_labeling_service.ListExamplesResponse(
examples=[], next_page_token="def",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(),], next_page_token="ghi",
),
data_labeling_service.ListExamplesResponse(
examples=[dataset.Example(), dataset.Example(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_examples(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.CreateAnnotationSpecSetRequest, dict,]
)
def test_create_annotation_spec_set(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
)
response = client.create_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_annotation_spec_set.AnnotationSpecSet)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
def test_create_annotation_spec_set_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
client.create_annotation_spec_set()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateAnnotationSpecSetRequest()
@pytest.mark.asyncio
async def test_create_annotation_spec_set_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.CreateAnnotationSpecSetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
)
)
response = await client.create_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_annotation_spec_set.AnnotationSpecSet)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
@pytest.mark.asyncio
async def test_create_annotation_spec_set_async_from_dict():
await test_create_annotation_spec_set_async(request_type=dict)
def test_create_annotation_spec_set_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateAnnotationSpecSetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
call.return_value = gcd_annotation_spec_set.AnnotationSpecSet()
client.create_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_annotation_spec_set_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateAnnotationSpecSetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_annotation_spec_set.AnnotationSpecSet()
)
await client.create_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_annotation_spec_set_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_annotation_spec_set.AnnotationSpecSet()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_annotation_spec_set(
parent="parent_value",
annotation_spec_set=gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].annotation_spec_set
mock_val = gcd_annotation_spec_set.AnnotationSpecSet(name="name_value")
assert arg == mock_val
def test_create_annotation_spec_set_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_annotation_spec_set(
data_labeling_service.CreateAnnotationSpecSetRequest(),
parent="parent_value",
annotation_spec_set=gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_annotation_spec_set_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_annotation_spec_set.AnnotationSpecSet()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_annotation_spec_set.AnnotationSpecSet()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_annotation_spec_set(
parent="parent_value",
annotation_spec_set=gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].annotation_spec_set
mock_val = gcd_annotation_spec_set.AnnotationSpecSet(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_annotation_spec_set_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_annotation_spec_set(
data_labeling_service.CreateAnnotationSpecSetRequest(),
parent="parent_value",
annotation_spec_set=gcd_annotation_spec_set.AnnotationSpecSet(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetAnnotationSpecSetRequest, dict,]
)
def test_get_annotation_spec_set(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec_set.AnnotationSpecSet(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
)
response = client.get_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec_set.AnnotationSpecSet)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
def test_get_annotation_spec_set_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
client.get_annotation_spec_set()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotationSpecSetRequest()
@pytest.mark.asyncio
async def test_get_annotation_spec_set_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetAnnotationSpecSetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec_set.AnnotationSpecSet(
name="name_value",
display_name="display_name_value",
description="description_value",
blocking_resources=["blocking_resources_value"],
)
)
response = await client.get_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec_set.AnnotationSpecSet)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blocking_resources == ["blocking_resources_value"]
@pytest.mark.asyncio
async def test_get_annotation_spec_set_async_from_dict():
await test_get_annotation_spec_set_async(request_type=dict)
def test_get_annotation_spec_set_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetAnnotationSpecSetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
call.return_value = annotation_spec_set.AnnotationSpecSet()
client.get_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_annotation_spec_set_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetAnnotationSpecSetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec_set.AnnotationSpecSet()
)
await client.get_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_annotation_spec_set_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec_set.AnnotationSpecSet()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotation_spec_set(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_annotation_spec_set_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_annotation_spec_set(
data_labeling_service.GetAnnotationSpecSetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_annotation_spec_set_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec_set.AnnotationSpecSet()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec_set.AnnotationSpecSet()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_annotation_spec_set(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_annotation_spec_set_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_annotation_spec_set(
data_labeling_service.GetAnnotationSpecSetRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListAnnotationSpecSetsRequest, dict,]
)
def test_list_annotation_spec_sets(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotationSpecSetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_annotation_spec_sets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotationSpecSetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationSpecSetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_annotation_spec_sets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
client.list_annotation_spec_sets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotationSpecSetsRequest()
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListAnnotationSpecSetsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotationSpecSetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_annotation_spec_sets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListAnnotationSpecSetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationSpecSetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_async_from_dict():
await test_list_annotation_spec_sets_async(request_type=dict)
def test_list_annotation_spec_sets_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListAnnotationSpecSetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
call.return_value = data_labeling_service.ListAnnotationSpecSetsResponse()
client.list_annotation_spec_sets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListAnnotationSpecSetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotationSpecSetsResponse()
)
await client.list_annotation_spec_sets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_annotation_spec_sets_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotationSpecSetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_annotation_spec_sets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_annotation_spec_sets_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_annotation_spec_sets(
data_labeling_service.ListAnnotationSpecSetsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListAnnotationSpecSetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListAnnotationSpecSetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_annotation_spec_sets(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_annotation_spec_sets(
data_labeling_service.ListAnnotationSpecSetsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_annotation_spec_sets_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[], next_page_token="def",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[annotation_spec_set.AnnotationSpecSet(),],
next_page_token="ghi",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_annotation_spec_sets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, annotation_spec_set.AnnotationSpecSet) for i in results
)
def test_list_annotation_spec_sets_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[], next_page_token="def",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[annotation_spec_set.AnnotationSpecSet(),],
next_page_token="ghi",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
),
RuntimeError,
)
pages = list(client.list_annotation_spec_sets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[], next_page_token="def",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[annotation_spec_set.AnnotationSpecSet(),],
next_page_token="ghi",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
),
RuntimeError,
)
async_pager = await client.list_annotation_spec_sets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, annotation_spec_set.AnnotationSpecSet) for i in responses
)
@pytest.mark.asyncio
async def test_list_annotation_spec_sets_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotation_spec_sets),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
next_page_token="abc",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[], next_page_token="def",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[annotation_spec_set.AnnotationSpecSet(),],
next_page_token="ghi",
),
data_labeling_service.ListAnnotationSpecSetsResponse(
annotation_spec_sets=[
annotation_spec_set.AnnotationSpecSet(),
annotation_spec_set.AnnotationSpecSet(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_annotation_spec_sets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.DeleteAnnotationSpecSetRequest, dict,]
)
def test_delete_annotation_spec_set(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_annotation_spec_set_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
client.delete_annotation_spec_set()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotationSpecSetRequest()
@pytest.mark.asyncio
async def test_delete_annotation_spec_set_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.DeleteAnnotationSpecSetRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteAnnotationSpecSetRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_annotation_spec_set_async_from_dict():
await test_delete_annotation_spec_set_async(request_type=dict)
def test_delete_annotation_spec_set_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteAnnotationSpecSetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
call.return_value = None
client.delete_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_annotation_spec_set_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteAnnotationSpecSetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_annotation_spec_set(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_annotation_spec_set_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_annotation_spec_set(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_annotation_spec_set_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_annotation_spec_set(
data_labeling_service.DeleteAnnotationSpecSetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_annotation_spec_set_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_annotation_spec_set), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_annotation_spec_set(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_annotation_spec_set_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_annotation_spec_set(
data_labeling_service.DeleteAnnotationSpecSetRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.CreateInstructionRequest, dict,]
)
def test_create_instruction(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateInstructionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_instruction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
client.create_instruction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateInstructionRequest()
@pytest.mark.asyncio
async def test_create_instruction_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.CreateInstructionRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateInstructionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_instruction_async_from_dict():
await test_create_instruction_async(request_type=dict)
def test_create_instruction_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateInstructionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_instruction_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateInstructionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_instruction_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_instruction(
parent="parent_value",
instruction=gcd_instruction.Instruction(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].instruction
mock_val = gcd_instruction.Instruction(name="name_value")
assert arg == mock_val
def test_create_instruction_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instruction(
data_labeling_service.CreateInstructionRequest(),
parent="parent_value",
instruction=gcd_instruction.Instruction(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_instruction_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_instruction(
parent="parent_value",
instruction=gcd_instruction.Instruction(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].instruction
mock_val = gcd_instruction.Instruction(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_instruction_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_instruction(
data_labeling_service.CreateInstructionRequest(),
parent="parent_value",
instruction=gcd_instruction.Instruction(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetInstructionRequest, dict,]
)
def test_get_instruction(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instruction.Instruction(
name="name_value",
display_name="display_name_value",
description="description_value",
data_type=dataset.DataType.IMAGE,
blocking_resources=["blocking_resources_value"],
)
response = client.get_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetInstructionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instruction.Instruction)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.data_type == dataset.DataType.IMAGE
assert response.blocking_resources == ["blocking_resources_value"]
def test_get_instruction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
client.get_instruction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetInstructionRequest()
@pytest.mark.asyncio
async def test_get_instruction_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetInstructionRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instruction.Instruction(
name="name_value",
display_name="display_name_value",
description="description_value",
data_type=dataset.DataType.IMAGE,
blocking_resources=["blocking_resources_value"],
)
)
response = await client.get_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetInstructionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instruction.Instruction)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.data_type == dataset.DataType.IMAGE
assert response.blocking_resources == ["blocking_resources_value"]
@pytest.mark.asyncio
async def test_get_instruction_async_from_dict():
await test_get_instruction_async(request_type=dict)
def test_get_instruction_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetInstructionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
call.return_value = instruction.Instruction()
client.get_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_instruction_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetInstructionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instruction.Instruction()
)
await client.get_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_instruction_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instruction.Instruction()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_instruction(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_instruction_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_instruction(
data_labeling_service.GetInstructionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_instruction_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instruction), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instruction.Instruction()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instruction.Instruction()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_instruction(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_instruction_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_instruction(
data_labeling_service.GetInstructionRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListInstructionsRequest, dict,]
)
def test_list_instructions(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListInstructionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_instructions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListInstructionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstructionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_instructions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
client.list_instructions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListInstructionsRequest()
@pytest.mark.asyncio
async def test_list_instructions_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListInstructionsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListInstructionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_instructions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListInstructionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstructionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_instructions_async_from_dict():
await test_list_instructions_async(request_type=dict)
def test_list_instructions_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListInstructionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
call.return_value = data_labeling_service.ListInstructionsResponse()
client.list_instructions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_instructions_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListInstructionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListInstructionsResponse()
)
await client.list_instructions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_instructions_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListInstructionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_instructions(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_instructions_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_instructions(
data_labeling_service.ListInstructionsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_instructions_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListInstructionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListInstructionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_instructions(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_instructions_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_instructions(
data_labeling_service.ListInstructionsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_instructions_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListInstructionsResponse(
instructions=[
instruction.Instruction(),
instruction.Instruction(),
instruction.Instruction(),
],
next_page_token="abc",
),
data_labeling_service.ListInstructionsResponse(
instructions=[], next_page_token="def",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(),], next_page_token="ghi",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(), instruction.Instruction(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_instructions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, instruction.Instruction) for i in results)
def test_list_instructions_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListInstructionsResponse(
instructions=[
instruction.Instruction(),
instruction.Instruction(),
instruction.Instruction(),
],
next_page_token="abc",
),
data_labeling_service.ListInstructionsResponse(
instructions=[], next_page_token="def",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(),], next_page_token="ghi",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(), instruction.Instruction(),],
),
RuntimeError,
)
pages = list(client.list_instructions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_instructions_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListInstructionsResponse(
instructions=[
instruction.Instruction(),
instruction.Instruction(),
instruction.Instruction(),
],
next_page_token="abc",
),
data_labeling_service.ListInstructionsResponse(
instructions=[], next_page_token="def",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(),], next_page_token="ghi",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(), instruction.Instruction(),],
),
RuntimeError,
)
async_pager = await client.list_instructions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, instruction.Instruction) for i in responses)
@pytest.mark.asyncio
async def test_list_instructions_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instructions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListInstructionsResponse(
instructions=[
instruction.Instruction(),
instruction.Instruction(),
instruction.Instruction(),
],
next_page_token="abc",
),
data_labeling_service.ListInstructionsResponse(
instructions=[], next_page_token="def",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(),], next_page_token="ghi",
),
data_labeling_service.ListInstructionsResponse(
instructions=[instruction.Instruction(), instruction.Instruction(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_instructions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.DeleteInstructionRequest, dict,]
)
def test_delete_instruction(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteInstructionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_instruction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
client.delete_instruction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteInstructionRequest()
@pytest.mark.asyncio
async def test_delete_instruction_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.DeleteInstructionRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteInstructionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_instruction_async_from_dict():
await test_delete_instruction_async(request_type=dict)
def test_delete_instruction_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteInstructionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
call.return_value = None
client.delete_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_instruction_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteInstructionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_instruction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_instruction_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_instruction(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_instruction_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instruction(
data_labeling_service.DeleteInstructionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_instruction_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instruction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_instruction(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_instruction_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_instruction(
data_labeling_service.DeleteInstructionRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetEvaluationRequest, dict,]
)
def test_get_evaluation(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation.Evaluation(
name="name_value",
annotation_type=annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION,
evaluated_item_count=2129,
)
response = client.get_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation.Evaluation)
assert response.name == "name_value"
assert (
response.annotation_type
== annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION
)
assert response.evaluated_item_count == 2129
def test_get_evaluation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
client.get_evaluation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationRequest()
@pytest.mark.asyncio
async def test_get_evaluation_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetEvaluationRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation.Evaluation(
name="name_value",
annotation_type=annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION,
evaluated_item_count=2129,
)
)
response = await client.get_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation.Evaluation)
assert response.name == "name_value"
assert (
response.annotation_type
== annotation.AnnotationType.IMAGE_CLASSIFICATION_ANNOTATION
)
assert response.evaluated_item_count == 2129
@pytest.mark.asyncio
async def test_get_evaluation_async_from_dict():
await test_get_evaluation_async(request_type=dict)
def test_get_evaluation_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetEvaluationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
call.return_value = evaluation.Evaluation()
client.get_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_evaluation_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetEvaluationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation.Evaluation()
)
await client.get_evaluation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_evaluation_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation.Evaluation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_evaluation(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_evaluation_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_evaluation(
data_labeling_service.GetEvaluationRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_evaluation_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_evaluation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation.Evaluation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation.Evaluation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_evaluation(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_evaluation_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_evaluation(
data_labeling_service.GetEvaluationRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.SearchEvaluationsRequest, dict,]
)
def test_search_evaluations(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchEvaluationsResponse(
next_page_token="next_page_token_value",
)
response = client.search_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchEvaluationsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_evaluations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
client.search_evaluations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchEvaluationsRequest()
@pytest.mark.asyncio
async def test_search_evaluations_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.SearchEvaluationsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchEvaluationsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchEvaluationsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_evaluations_async_from_dict():
await test_search_evaluations_async(request_type=dict)
def test_search_evaluations_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.SearchEvaluationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
call.return_value = data_labeling_service.SearchEvaluationsResponse()
client.search_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_evaluations_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.SearchEvaluationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchEvaluationsResponse()
)
await client.search_evaluations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_evaluations_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchEvaluationsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_evaluations(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_search_evaluations_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_evaluations(
data_labeling_service.SearchEvaluationsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_search_evaluations_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchEvaluationsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchEvaluationsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_evaluations(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_evaluations_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_evaluations(
data_labeling_service.SearchEvaluationsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_search_evaluations_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchEvaluationsResponse(
evaluations=[
evaluation.Evaluation(),
evaluation.Evaluation(),
evaluation.Evaluation(),
],
next_page_token="abc",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[], next_page_token="def",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(),], next_page_token="ghi",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(), evaluation.Evaluation(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.search_evaluations(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, evaluation.Evaluation) for i in results)
def test_search_evaluations_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchEvaluationsResponse(
evaluations=[
evaluation.Evaluation(),
evaluation.Evaluation(),
evaluation.Evaluation(),
],
next_page_token="abc",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[], next_page_token="def",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(),], next_page_token="ghi",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(), evaluation.Evaluation(),],
),
RuntimeError,
)
pages = list(client.search_evaluations(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_evaluations_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchEvaluationsResponse(
evaluations=[
evaluation.Evaluation(),
evaluation.Evaluation(),
evaluation.Evaluation(),
],
next_page_token="abc",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[], next_page_token="def",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(),], next_page_token="ghi",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(), evaluation.Evaluation(),],
),
RuntimeError,
)
async_pager = await client.search_evaluations(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, evaluation.Evaluation) for i in responses)
@pytest.mark.asyncio
async def test_search_evaluations_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchEvaluationsResponse(
evaluations=[
evaluation.Evaluation(),
evaluation.Evaluation(),
evaluation.Evaluation(),
],
next_page_token="abc",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[], next_page_token="def",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(),], next_page_token="ghi",
),
data_labeling_service.SearchEvaluationsResponse(
evaluations=[evaluation.Evaluation(), evaluation.Evaluation(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_evaluations(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.SearchExampleComparisonsRequest, dict,]
)
def test_search_example_comparisons(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchExampleComparisonsResponse(
next_page_token="next_page_token_value",
)
response = client.search_example_comparisons(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchExampleComparisonsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchExampleComparisonsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_example_comparisons_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
client.search_example_comparisons()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchExampleComparisonsRequest()
@pytest.mark.asyncio
async def test_search_example_comparisons_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.SearchExampleComparisonsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchExampleComparisonsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_example_comparisons(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.SearchExampleComparisonsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchExampleComparisonsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_example_comparisons_async_from_dict():
await test_search_example_comparisons_async(request_type=dict)
def test_search_example_comparisons_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.SearchExampleComparisonsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
call.return_value = data_labeling_service.SearchExampleComparisonsResponse()
client.search_example_comparisons(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_example_comparisons_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.SearchExampleComparisonsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchExampleComparisonsResponse()
)
await client.search_example_comparisons(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_example_comparisons_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchExampleComparisonsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_example_comparisons(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_search_example_comparisons_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_example_comparisons(
data_labeling_service.SearchExampleComparisonsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_search_example_comparisons_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.SearchExampleComparisonsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.SearchExampleComparisonsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_example_comparisons(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_example_comparisons_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_example_comparisons(
data_labeling_service.SearchExampleComparisonsRequest(),
parent="parent_value",
)
def test_search_example_comparisons_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="abc",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[], next_page_token="def",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="ghi",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.search_example_comparisons(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(
i,
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison,
)
for i in results
)
def test_search_example_comparisons_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="abc",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[], next_page_token="def",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="ghi",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
),
RuntimeError,
)
pages = list(client.search_example_comparisons(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_example_comparisons_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="abc",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[], next_page_token="def",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="ghi",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
),
RuntimeError,
)
async_pager = await client.search_example_comparisons(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(
i,
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison,
)
for i in responses
)
@pytest.mark.asyncio
async def test_search_example_comparisons_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_example_comparisons),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="abc",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[], next_page_token="def",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
next_page_token="ghi",
),
data_labeling_service.SearchExampleComparisonsResponse(
example_comparisons=[
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
data_labeling_service.SearchExampleComparisonsResponse.ExampleComparison(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_example_comparisons(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [data_labeling_service.CreateEvaluationJobRequest, dict,]
)
def test_create_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
response = client.create_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
def test_create_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
client.create_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateEvaluationJobRequest()
@pytest.mark.asyncio
async def test_create_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.CreateEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
)
response = await client.create_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.CreateEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
@pytest.mark.asyncio
async def test_create_evaluation_job_async_from_dict():
await test_create_evaluation_job_async(request_type=dict)
def test_create_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateEvaluationJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
call.return_value = evaluation_job.EvaluationJob()
client.create_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.CreateEvaluationJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob()
)
await client.create_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_evaluation_job(
parent="parent_value", job=evaluation_job.EvaluationJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = evaluation_job.EvaluationJob(name="name_value")
assert arg == mock_val
def test_create_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_evaluation_job(
data_labeling_service.CreateEvaluationJobRequest(),
parent="parent_value",
job=evaluation_job.EvaluationJob(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_evaluation_job(
parent="parent_value", job=evaluation_job.EvaluationJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = evaluation_job.EvaluationJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_evaluation_job(
data_labeling_service.CreateEvaluationJobRequest(),
parent="parent_value",
job=evaluation_job.EvaluationJob(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.UpdateEvaluationJobRequest, dict,]
)
def test_update_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=gcd_evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
response = client.update_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.UpdateEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == gcd_evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
def test_update_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
client.update_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.UpdateEvaluationJobRequest()
@pytest.mark.asyncio
async def test_update_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.UpdateEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=gcd_evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
)
response = await client.update_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.UpdateEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == gcd_evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
@pytest.mark.asyncio
async def test_update_evaluation_job_async_from_dict():
await test_update_evaluation_job_async(request_type=dict)
def test_update_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.UpdateEvaluationJobRequest()
request.evaluation_job.name = "evaluation_job.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
call.return_value = gcd_evaluation_job.EvaluationJob()
client.update_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"evaluation_job.name=evaluation_job.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.UpdateEvaluationJobRequest()
request.evaluation_job.name = "evaluation_job.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_evaluation_job.EvaluationJob()
)
await client.update_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"evaluation_job.name=evaluation_job.name/value",
) in kw["metadata"]
def test_update_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_evaluation_job.EvaluationJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_evaluation_job(
evaluation_job=gcd_evaluation_job.EvaluationJob(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].evaluation_job
mock_val = gcd_evaluation_job.EvaluationJob(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_evaluation_job(
data_labeling_service.UpdateEvaluationJobRequest(),
evaluation_job=gcd_evaluation_job.EvaluationJob(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_evaluation_job.EvaluationJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_evaluation_job.EvaluationJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_evaluation_job(
evaluation_job=gcd_evaluation_job.EvaluationJob(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].evaluation_job
mock_val = gcd_evaluation_job.EvaluationJob(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_evaluation_job(
data_labeling_service.UpdateEvaluationJobRequest(),
evaluation_job=gcd_evaluation_job.EvaluationJob(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.GetEvaluationJobRequest, dict,]
)
def test_get_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
response = client.get_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
def test_get_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
client.get_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationJobRequest()
@pytest.mark.asyncio
async def test_get_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.GetEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob(
name="name_value",
description="description_value",
state=evaluation_job.EvaluationJob.State.SCHEDULED,
schedule="schedule_value",
model_version="model_version_value",
annotation_spec_set="annotation_spec_set_value",
label_missing_ground_truth=True,
)
)
response = await client.get_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.GetEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, evaluation_job.EvaluationJob)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == evaluation_job.EvaluationJob.State.SCHEDULED
assert response.schedule == "schedule_value"
assert response.model_version == "model_version_value"
assert response.annotation_spec_set == "annotation_spec_set_value"
assert response.label_missing_ground_truth is True
@pytest.mark.asyncio
async def test_get_evaluation_job_async_from_dict():
await test_get_evaluation_job_async(request_type=dict)
def test_get_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
call.return_value = evaluation_job.EvaluationJob()
client.get_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.GetEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob()
)
await client.get_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_evaluation_job(
data_labeling_service.GetEvaluationJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = evaluation_job.EvaluationJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
evaluation_job.EvaluationJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_evaluation_job(
data_labeling_service.GetEvaluationJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.PauseEvaluationJobRequest, dict,]
)
def test_pause_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.pause_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.PauseEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_pause_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
client.pause_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.PauseEvaluationJobRequest()
@pytest.mark.asyncio
async def test_pause_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.PauseEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.pause_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.PauseEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_pause_evaluation_job_async_from_dict():
await test_pause_evaluation_job_async(request_type=dict)
def test_pause_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.PauseEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
call.return_value = None
client.pause_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_pause_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.PauseEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.pause_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_pause_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.pause_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_pause_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pause_evaluation_job(
data_labeling_service.PauseEvaluationJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_pause_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.pause_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_pause_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pause_evaluation_job(
data_labeling_service.PauseEvaluationJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ResumeEvaluationJobRequest, dict,]
)
def test_resume_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.resume_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ResumeEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_resume_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
client.resume_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ResumeEvaluationJobRequest()
@pytest.mark.asyncio
async def test_resume_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ResumeEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.resume_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ResumeEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_resume_evaluation_job_async_from_dict():
await test_resume_evaluation_job_async(request_type=dict)
def test_resume_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ResumeEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
call.return_value = None
client.resume_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_resume_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ResumeEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.resume_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_resume_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.resume_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_resume_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resume_evaluation_job(
data_labeling_service.ResumeEvaluationJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_resume_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.resume_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_resume_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.resume_evaluation_job(
data_labeling_service.ResumeEvaluationJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.DeleteEvaluationJobRequest, dict,]
)
def test_delete_evaluation_job(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_evaluation_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
client.delete_evaluation_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteEvaluationJobRequest()
@pytest.mark.asyncio
async def test_delete_evaluation_job_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.DeleteEvaluationJobRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.DeleteEvaluationJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_evaluation_job_async_from_dict():
await test_delete_evaluation_job_async(request_type=dict)
def test_delete_evaluation_job_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
call.return_value = None
client.delete_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_evaluation_job_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.DeleteEvaluationJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_evaluation_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_evaluation_job_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_evaluation_job_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_evaluation_job(
data_labeling_service.DeleteEvaluationJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_evaluation_job_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_evaluation_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_evaluation_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_evaluation_job_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_evaluation_job(
data_labeling_service.DeleteEvaluationJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [data_labeling_service.ListEvaluationJobsRequest, dict,]
)
def test_list_evaluation_jobs(request_type, transport: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListEvaluationJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_evaluation_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListEvaluationJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEvaluationJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_evaluation_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
client.list_evaluation_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListEvaluationJobsRequest()
@pytest.mark.asyncio
async def test_list_evaluation_jobs_async(
transport: str = "grpc_asyncio",
request_type=data_labeling_service.ListEvaluationJobsRequest,
):
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListEvaluationJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_evaluation_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == data_labeling_service.ListEvaluationJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEvaluationJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_evaluation_jobs_async_from_dict():
await test_list_evaluation_jobs_async(request_type=dict)
def test_list_evaluation_jobs_field_headers():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListEvaluationJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
call.return_value = data_labeling_service.ListEvaluationJobsResponse()
client.list_evaluation_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_evaluation_jobs_field_headers_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = data_labeling_service.ListEvaluationJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListEvaluationJobsResponse()
)
await client.list_evaluation_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_evaluation_jobs_flattened():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListEvaluationJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_evaluation_jobs(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_evaluation_jobs_flattened_error():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_evaluation_jobs(
data_labeling_service.ListEvaluationJobsRequest(),
parent="parent_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_evaluation_jobs_flattened_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_service.ListEvaluationJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_service.ListEvaluationJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_evaluation_jobs(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_evaluation_jobs_flattened_error_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_evaluation_jobs(
data_labeling_service.ListEvaluationJobsRequest(),
parent="parent_value",
filter="filter_value",
)
def test_list_evaluation_jobs_pager(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
next_page_token="abc",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[], next_page_token="def",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[evaluation_job.EvaluationJob(),],
next_page_token="ghi",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_evaluation_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, evaluation_job.EvaluationJob) for i in results)
def test_list_evaluation_jobs_pages(transport_name: str = "grpc"):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
next_page_token="abc",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[], next_page_token="def",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[evaluation_job.EvaluationJob(),],
next_page_token="ghi",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
),
RuntimeError,
)
pages = list(client.list_evaluation_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_evaluation_jobs_async_pager():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
next_page_token="abc",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[], next_page_token="def",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[evaluation_job.EvaluationJob(),],
next_page_token="ghi",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
),
RuntimeError,
)
async_pager = await client.list_evaluation_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, evaluation_job.EvaluationJob) for i in responses)
@pytest.mark.asyncio
async def test_list_evaluation_jobs_async_pages():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_evaluation_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
next_page_token="abc",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[], next_page_token="def",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[evaluation_job.EvaluationJob(),],
next_page_token="ghi",
),
data_labeling_service.ListEvaluationJobsResponse(
evaluation_jobs=[
evaluation_job.EvaluationJob(),
evaluation_job.EvaluationJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_evaluation_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataLabelingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataLabelingServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataLabelingServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataLabelingServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DataLabelingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DataLabelingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DataLabelingServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DataLabelingServiceGrpcTransport,
transports.DataLabelingServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.DataLabelingServiceGrpcTransport,)
def test_data_labeling_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DataLabelingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_data_labeling_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datalabeling_v1beta1.services.data_labeling_service.transports.DataLabelingServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DataLabelingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_dataset",
"get_dataset",
"list_datasets",
"delete_dataset",
"import_data",
"export_data",
"get_data_item",
"list_data_items",
"get_annotated_dataset",
"list_annotated_datasets",
"delete_annotated_dataset",
"label_image",
"label_video",
"label_text",
"get_example",
"list_examples",
"create_annotation_spec_set",
"get_annotation_spec_set",
"list_annotation_spec_sets",
"delete_annotation_spec_set",
"create_instruction",
"get_instruction",
"list_instructions",
"delete_instruction",
"get_evaluation",
"search_evaluations",
"search_example_comparisons",
"create_evaluation_job",
"update_evaluation_job",
"get_evaluation_job",
"pause_evaluation_job",
"resume_evaluation_job",
"delete_evaluation_job",
"list_evaluation_jobs",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_data_labeling_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.datalabeling_v1beta1.services.data_labeling_service.transports.DataLabelingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataLabelingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_data_labeling_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.datalabeling_v1beta1.services.data_labeling_service.transports.DataLabelingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataLabelingServiceTransport()
adc.assert_called_once()
def test_data_labeling_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DataLabelingServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DataLabelingServiceGrpcTransport,
transports.DataLabelingServiceGrpcAsyncIOTransport,
],
)
def test_data_labeling_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DataLabelingServiceGrpcTransport, grpc_helpers),
(transports.DataLabelingServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_data_labeling_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"datalabeling.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="datalabeling.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DataLabelingServiceGrpcTransport,
transports.DataLabelingServiceGrpcAsyncIOTransport,
],
)
def test_data_labeling_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_data_labeling_service_host_no_port():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datalabeling.googleapis.com"
),
)
assert client.transport._host == "datalabeling.googleapis.com:443"
def test_data_labeling_service_host_with_port():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datalabeling.googleapis.com:8000"
),
)
assert client.transport._host == "datalabeling.googleapis.com:8000"
def test_data_labeling_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataLabelingServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_data_labeling_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataLabelingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DataLabelingServiceGrpcTransport,
transports.DataLabelingServiceGrpcAsyncIOTransport,
],
)
def test_data_labeling_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DataLabelingServiceGrpcTransport,
transports.DataLabelingServiceGrpcAsyncIOTransport,
],
)
def test_data_labeling_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_data_labeling_service_grpc_lro_client():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_data_labeling_service_grpc_lro_async_client():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_annotated_dataset_path():
project = "squid"
dataset = "clam"
annotated_dataset = "whelk"
expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
actual = DataLabelingServiceClient.annotated_dataset_path(
project, dataset, annotated_dataset
)
assert expected == actual
def test_parse_annotated_dataset_path():
expected = {
"project": "octopus",
"dataset": "oyster",
"annotated_dataset": "nudibranch",
}
path = DataLabelingServiceClient.annotated_dataset_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_annotated_dataset_path(path)
assert expected == actual
def test_annotation_spec_set_path():
project = "cuttlefish"
annotation_spec_set = "mussel"
expected = "projects/{project}/annotationSpecSets/{annotation_spec_set}".format(
project=project, annotation_spec_set=annotation_spec_set,
)
actual = DataLabelingServiceClient.annotation_spec_set_path(
project, annotation_spec_set
)
assert expected == actual
def test_parse_annotation_spec_set_path():
expected = {
"project": "winkle",
"annotation_spec_set": "nautilus",
}
path = DataLabelingServiceClient.annotation_spec_set_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_annotation_spec_set_path(path)
assert expected == actual
def test_data_item_path():
project = "scallop"
dataset = "abalone"
data_item = "squid"
expected = "projects/{project}/datasets/{dataset}/dataItems/{data_item}".format(
project=project, dataset=dataset, data_item=data_item,
)
actual = DataLabelingServiceClient.data_item_path(project, dataset, data_item)
assert expected == actual
def test_parse_data_item_path():
expected = {
"project": "clam",
"dataset": "whelk",
"data_item": "octopus",
}
path = DataLabelingServiceClient.data_item_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_data_item_path(path)
assert expected == actual
def test_dataset_path():
project = "oyster"
dataset = "nudibranch"
expected = "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
actual = DataLabelingServiceClient.dataset_path(project, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "cuttlefish",
"dataset": "mussel",
}
path = DataLabelingServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_dataset_path(path)
assert expected == actual
def test_evaluation_path():
project = "winkle"
dataset = "nautilus"
evaluation = "scallop"
expected = "projects/{project}/datasets/{dataset}/evaluations/{evaluation}".format(
project=project, dataset=dataset, evaluation=evaluation,
)
actual = DataLabelingServiceClient.evaluation_path(project, dataset, evaluation)
assert expected == actual
def test_parse_evaluation_path():
expected = {
"project": "abalone",
"dataset": "squid",
"evaluation": "clam",
}
path = DataLabelingServiceClient.evaluation_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_evaluation_path(path)
assert expected == actual
def test_evaluation_job_path():
project = "whelk"
evaluation_job = "octopus"
expected = "projects/{project}/evaluationJobs/{evaluation_job}".format(
project=project, evaluation_job=evaluation_job,
)
actual = DataLabelingServiceClient.evaluation_job_path(project, evaluation_job)
assert expected == actual
def test_parse_evaluation_job_path():
expected = {
"project": "oyster",
"evaluation_job": "nudibranch",
}
path = DataLabelingServiceClient.evaluation_job_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_evaluation_job_path(path)
assert expected == actual
def test_example_path():
project = "cuttlefish"
dataset = "mussel"
annotated_dataset = "winkle"
example = "nautilus"
expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}/examples/{example}".format(
project=project,
dataset=dataset,
annotated_dataset=annotated_dataset,
example=example,
)
actual = DataLabelingServiceClient.example_path(
project, dataset, annotated_dataset, example
)
assert expected == actual
def test_parse_example_path():
expected = {
"project": "scallop",
"dataset": "abalone",
"annotated_dataset": "squid",
"example": "clam",
}
path = DataLabelingServiceClient.example_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_example_path(path)
assert expected == actual
def test_instruction_path():
project = "whelk"
instruction = "octopus"
expected = "projects/{project}/instructions/{instruction}".format(
project=project, instruction=instruction,
)
actual = DataLabelingServiceClient.instruction_path(project, instruction)
assert expected == actual
def test_parse_instruction_path():
expected = {
"project": "oyster",
"instruction": "nudibranch",
}
path = DataLabelingServiceClient.instruction_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_instruction_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DataLabelingServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = DataLabelingServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = DataLabelingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = DataLabelingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = DataLabelingServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = DataLabelingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = DataLabelingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = DataLabelingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DataLabelingServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = DataLabelingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DataLabelingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DataLabelingServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DataLabelingServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DataLabelingServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DataLabelingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DataLabelingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DataLabelingServiceClient, transports.DataLabelingServiceGrpcTransport),
(
DataLabelingServiceAsyncClient,
transports.DataLabelingServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-datalabeling
|
tests/unit/gapic/datalabeling_v1beta1/test_data_labeling_service.py
|
Python
|
apache-2.0
| 417,398
|
[
"Octopus"
] |
1c85054294524d682bd88ec694948ea33367ad2724cf6030ebae8e87ca4a224b
|
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Auxiliary scripts for generating Makefiles
# and Visual Studio project files.
#
# Author: Leonardo de Moura (leonardo)
############################################
import sys
import os
import glob
import re
import getopt
import shutil
from mk_exception import *
from fnmatch import fnmatch
import distutils.sysconfig
import compileall
import subprocess
import string
def getenv(name, default):
try:
return os.environ[name].strip(' "\'')
except:
return default
CXX=getenv("CXX", None)
CC=getenv("CC", None)
CPPFLAGS=getenv("CPPFLAGS", "")
CXXFLAGS=getenv("CXXFLAGS", "")
EXAMP_DEBUG_FLAG=''
LDFLAGS=getenv("LDFLAGS", "")
JNI_HOME=getenv("JNI_HOME", None)
OCAMLC=getenv("OCAMLC", "ocamlc")
OCAMLOPT=getenv("OCAMLOPT", "ocamlopt")
OCAML_LIB=getenv("OCAML_LIB", None)
OCAMLFIND=getenv("OCAMLFIND", "ocamlfind")
CXX_COMPILERS=['g++', 'clang++']
C_COMPILERS=['gcc', 'clang']
JAVAC=None
JAR=None
PYTHON_PACKAGE_DIR=distutils.sysconfig.get_python_lib()
BUILD_DIR='build'
REV_BUILD_DIR='..'
SRC_DIR='src'
EXAMPLE_DIR='examples'
# Required Components
Z3_DLL_COMPONENT='api_dll'
PATTERN_COMPONENT='pattern'
UTIL_COMPONENT='util'
API_COMPONENT='api'
DOTNET_COMPONENT='dotnet'
JAVA_COMPONENT='java'
ML_COMPONENT='ml'
CPP_COMPONENT='cpp'
#####################
IS_WINDOWS=False
IS_LINUX=False
IS_OSX=False
IS_FREEBSD=False
VERBOSE=True
DEBUG_MODE=False
SHOW_CPPS = True
VS_X64 = False
LINUX_X64 = True
ONLY_MAKEFILES = False
Z3PY_SRC_DIR=None
VS_PROJ = False
TRACE = False
DOTNET_ENABLED=False
JAVA_ENABLED=False
ML_ENABLED=False
STATIC_LIB=False
VER_MAJOR=None
VER_MINOR=None
VER_BUILD=None
VER_REVISION=None
PREFIX=os.path.split(os.path.split(os.path.split(PYTHON_PACKAGE_DIR)[0])[0])[0]
GMP=False
FOCI2=False
FOCI2LIB=''
VS_PAR=False
VS_PAR_NUM=8
GPROF=False
GIT_HASH=False
def check_output(cmd):
return str(subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]).rstrip('\r\n')
def git_hash():
try:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
r = check_output(['git', 'show-ref', '--abbrev=12', 'refs/heads/%s' % branch])
except:
raise MKException("Failed to retrieve git hash")
ls = r.split(' ')
if len(ls) != 2:
raise MKException("Unexpected git output")
return ls[0]
def is_windows():
return IS_WINDOWS
def is_linux():
return IS_LINUX
def is_freebsd():
return IS_FREEBSD
def is_osx():
return IS_OSX
def norm_path(p):
# We use '/' on mk_project for convenience
return os.path.join(*(p.split('/')))
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in getenv("PATH", "").split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
class TempFile:
def __init__(self, name):
try:
self.name = name
self.fname = open(name, 'w')
except:
raise MKException("Failed to create temporary file '%s'" % self.name)
def add(self, s):
self.fname.write(s)
def commit(self):
self.fname.close()
def __del__(self):
self.fname.close()
try:
os.remove(self.name)
except:
pass
def exec_cmd(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
new_cmd = []
first = True
for e in cmd:
if first:
first = False
new_cmd.append(e)
else:
if e != "":
se = e.split(' ')
if len(se) > 1:
for e2 in se:
if e2 != "":
new_cmd.append(e2)
else:
new_cmd.append(e)
cmd = new_cmd
null = open(os.devnull, 'wb')
try:
return subprocess.call(cmd, stdout=null, stderr=null)
except:
# Failed to create process
return 1
# rm -f fname
def rmf(fname):
if os.path.exists(fname):
os.remove(fname)
def exec_compiler_cmd(cmd):
r = exec_cmd(cmd)
rmf('a.out')
return r
def test_cxx_compiler(cc):
if is_verbose():
print("Testing %s..." % cc)
t = TempFile('tst.cpp')
t.add('#include<iostream>\nint main() { return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, CXXFLAGS, 'tst.cpp', LDFLAGS]) == 0
def test_c_compiler(cc):
if is_verbose():
print("Testing %s..." % cc)
t = TempFile('tst.c')
t.add('#include<stdio.h>\nint main() { return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, 'tst.c', LDFLAGS]) == 0
def test_gmp(cc):
if is_verbose():
print("Testing GMP...")
t = TempFile('tstgmp.cpp')
t.add('#include<gmp.h>\nint main() { mpz_t t; mpz_init(t); mpz_clear(t); return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, 'tstgmp.cpp', LDFLAGS, '-lgmp']) == 0
def test_foci2(cc,foci2lib):
if is_verbose():
print("Testing FOCI2...")
t = TempFile('tstfoci2.cpp')
t.add('#include<foci2.h>\nint main() { foci2 *f = foci2::create("lia"); return 0; }\n')
t.commit()
return exec_compiler_cmd([cc, CPPFLAGS, '-Isrc/interp', 'tstfoci2.cpp', LDFLAGS, foci2lib]) == 0
def test_openmp(cc):
if is_verbose():
print("Testing OpenMP...")
t = TempFile('tstomp.cpp')
t.add('#include<omp.h>\nint main() { return omp_in_parallel() ? 1 : 0; }\n')
t.commit()
if IS_WINDOWS:
r = exec_compiler_cmd([cc, CPPFLAGS, 'tstomp.cpp', LDFLAGS, '/openmp']) == 0
try:
rmf('tstomp.obj')
rmf('tstomp.exe')
except:
pass
return r
else:
return exec_compiler_cmd([cc, CPPFLAGS, 'tstomp.cpp', LDFLAGS, '-fopenmp']) == 0
def find_jni_h(path):
for root, dirs, files in os.walk(path):
for f in files:
if f == 'jni.h':
return root
return False
def check_java():
global JNI_HOME
global JAVAC
global JAR
JDK_HOME = getenv('JDK_HOME', None) # we only need to check this locally.
if is_verbose():
print("Finding javac ...")
if JDK_HOME != None:
if IS_WINDOWS:
JAVAC = os.path.join(JDK_HOME, 'bin', 'javac.exe')
else:
JAVAC = os.path.join(JDK_HOME, 'bin', 'javac')
if not os.path.exists(JAVAC):
raise MKException("Failed to detect javac at '%s/bin'; the environment variable JDK_HOME is probably set to the wrong path." % os.path.join(JDK_HOME))
else:
# Search for javac in the path.
ind = 'javac';
if IS_WINDOWS:
ind = ind + '.exe'
paths = os.getenv('PATH', None)
if paths:
spaths = paths.split(os.pathsep)
for i in range(0, len(spaths)):
cmb = os.path.join(spaths[i], ind)
if os.path.exists(cmb):
JAVAC = cmb
break
if JAVAC == None:
raise MKException('No java compiler in the path, please adjust your PATH or set JDK_HOME to the location of the JDK.')
if is_verbose():
print("Finding jar ...")
if IS_WINDOWS:
JAR = os.path.join(os.path.dirname(JAVAC), 'jar.exe')
else:
JAR = os.path.join(os.path.dirname(JAVAC), 'jar')
if not os.path.exists(JAR):
raise MKException("Failed to detect jar at '%s'; the environment variable JDK_HOME is probably set to the wrong path." % os.path.join(JDK_HOME))
if is_verbose():
print("Testing %s..." % JAVAC)
t = TempFile('Hello.java')
t.add('public class Hello { public static void main(String[] args) { System.out.println("Hello, World"); }}\n')
t.commit()
oo = TempFile('output')
eo = TempFile('errout')
try:
subprocess.call([JAVAC, 'Hello.java', '-verbose'], stdout=oo.fname, stderr=eo.fname)
oo.commit()
eo.commit()
except:
raise MKException('Found, but failed to run Java compiler at %s' % (JAVAC))
os.remove('Hello.class')
if is_verbose():
print("Finding jni.h...")
if JNI_HOME != None:
if not os.path.exists(os.path.join(JNI_HOME, 'jni.h')):
raise MKException("Failed to detect jni.h '%s'; the environment variable JNI_HOME is probably set to the wrong path." % os.path.join(JNI_HOME))
else:
# Search for jni.h in the library directories...
t = open('errout', 'r')
open_pat = re.compile("\[search path for class files: (.*)\]")
cdirs = []
for line in t:
m = open_pat.match(line)
if m:
libdirs = m.group(1).split(',')
for libdir in libdirs:
q = os.path.dirname(libdir)
if cdirs.count(q) == 0:
cdirs.append(q)
# ... plus some heuristic ones.
extra_dirs = []
# For the libraries, even the JDK usually uses a JRE that comes with it. To find the
# headers we have to go a little bit higher up.
for dir in cdirs:
extra_dirs.append(os.path.abspath(os.path.join(dir, '..')))
if IS_OSX: # Apparently Apple knows best where to put stuff...
extra_dirs.append('/System/Library/Frameworks/JavaVM.framework/Headers/')
cdirs[len(cdirs):] = extra_dirs
for dir in cdirs:
q = find_jni_h(dir)
if q != False:
JNI_HOME = q
if JNI_HOME == None:
raise MKException("Failed to detect jni.h. Possible solution: set JNI_HOME with the path to JDK.")
def check_ml():
t = TempFile('hello.ml')
t.add('print_string "Hello world!\n";;')
t.commit()
if is_verbose():
print ('Testing %s...' % OCAMLC)
r = exec_cmd([OCAMLC, '-o', 'a.out', 'hello.ml'])
if r != 0:
raise MKException('Failed testing ocamlc compiler. Set environment variable OCAMLC with the path to the Ocaml compiler')
if is_verbose():
print ('Testing %s...' % OCAMLOPT)
r = exec_cmd([OCAMLOPT, '-o', 'a.out', 'hello.ml'])
if r != 0:
raise MKException('Failed testing ocamlopt compiler. Set environment variable OCAMLOPT with the path to the Ocaml native compiler. Note that ocamlopt may require flexlink to be in your path.')
try:
rmf('hello.cmi')
rmf('hello.cmo')
rmf('hello.cmx')
rmf('a.out')
rmf('hello.o')
except:
pass
find_ml_lib()
find_ocaml_find()
def find_ocaml_find():
global OCAMLFIND
if is_verbose():
print ("Testing %s..." % OCAMLFIND)
r = exec_cmd([OCAMLFIND, 'printconf'])
if r != 0:
OCAMLFIND=''
def find_ml_lib():
global OCAML_LIB
if is_verbose():
print ('Finding OCAML_LIB...')
t = TempFile('output')
null = open(os.devnull, 'wb')
try:
subprocess.call([OCAMLC, '-where'], stdout=t.fname, stderr=null)
t.commit()
except:
raise MKException('Failed to find Ocaml library; please set OCAML_LIB')
t = open('output', 'r')
for line in t:
OCAML_LIB = line[:-1]
if is_verbose():
print ('OCAML_LIB=%s' % OCAML_LIB)
t.close()
rmf('output')
return
def is64():
global LINUX_X64
return LINUX_X64 and sys.maxsize >= 2**32
def check_ar():
if is_verbose():
print("Testing ar...")
if which('ar')== None:
raise MKException('ar (archive tool) was not found')
def find_cxx_compiler():
global CXX, CXX_COMPILERS
if CXX != None:
if test_cxx_compiler(CXX):
return CXX
for cxx in CXX_COMPILERS:
if test_cxx_compiler(cxx):
CXX = cxx
return CXX
raise MKException('C++ compiler was not found. Try to set the environment variable CXX with the C++ compiler available in your system.')
def find_c_compiler():
global CC, C_COMPILERS
if CC != None:
if test_c_compiler(CC):
return CC
for c in C_COMPILERS:
if test_c_compiler(c):
CC = c
return CC
raise MKException('C compiler was not found. Try to set the environment variable CC with the C compiler available in your system.')
def set_version(major, minor, build, revision):
global VER_MAJOR, VER_MINOR, VER_BUILD, VER_REVISION
VER_MAJOR = major
VER_MINOR = minor
VER_BUILD = build
VER_REVISION = revision
def get_version():
return (VER_MAJOR, VER_MINOR, VER_BUILD, VER_REVISION)
def build_static_lib():
return STATIC_LIB
def is_cr_lf(fname):
# Check whether text files use cr/lf
f = open(fname, 'r')
line = f.readline()
sz = len(line)
return sz >= 2 and line[sz-2] == '\r' and line[sz-1] == '\n'
# dos2unix in python
# cr/lf --> lf
def dos2unix(fname):
if is_cr_lf(fname):
fin = open(fname, 'r')
fname_new = '%s.new' % fname
fout = open(fname_new, 'w')
for line in fin:
line = line.rstrip('\r\n')
fout.write(line)
fout.write('\n')
fin.close()
fout.close()
shutil.move(fname_new, fname)
if is_verbose():
print("dos2unix '%s'" % fname)
def dos2unix_tree_core(pattern, dir, files):
for filename in files:
if fnmatch(filename, pattern):
fname = os.path.join(dir, filename)
if not os.path.isdir(fname):
dos2unix(fname)
def dos2unix_tree():
os.path.walk('src', dos2unix_tree_core, '*')
def check_eol():
if not IS_WINDOWS:
# Linux/OSX/BSD check if the end-of-line is cr/lf
if is_cr_lf('LICENSE.txt'):
if is_verbose():
print("Fixing end of line...")
dos2unix_tree()
if os.name == 'nt':
IS_WINDOWS=True
# Visual Studio already displays the files being compiled
SHOW_CPPS=False
# Enable .Net bindings by default on windows
DOTNET_ENABLED=True
elif os.name == 'posix':
if os.uname()[0] == 'Darwin':
IS_OSX=True
elif os.uname()[0] == 'Linux':
IS_LINUX=True
elif os.uname()[0] == 'FreeBSD':
IS_FREEBSD=True
def display_help(exit_code):
print("mk_make.py: Z3 Makefile generator\n")
print("This script generates the Makefile for the Z3 theorem prover.")
print("It must be executed from the Z3 root directory.")
print("\nOptions:")
print(" -h, --help display this message.")
print(" -s, --silent do not print verbose messages.")
if not IS_WINDOWS:
print(" -p <dir>, --prefix=<dir> installation prefix (default: %s)." % PREFIX)
else:
print(" --parallel=num use cl option /MP with 'num' parallel processes")
print(" -b <sudir>, --build=<subdir> subdirectory where Z3 will be built (default: build).")
print(" --githash=hash include the given hash in the binaries.")
print(" -d, --debug compile Z3 in debug mode.")
print(" -t, --trace enable tracing in release mode.")
if IS_WINDOWS:
print(" -x, --x64 create 64 binary when using Visual Studio.")
else:
print(" --x86 force 32-bit x86 build on x64 systems.")
print(" -m, --makefiles generate only makefiles.")
if IS_WINDOWS:
print(" -v, --vsproj generate Visual Studio Project Files.")
if IS_WINDOWS:
print(" -n, --nodotnet do not generate Microsoft.Z3.dll make rules.")
print(" -j, --java generate Java bindings.")
print(" --ml generate OCaml bindings.")
print(" --staticlib build Z3 static library.")
if not IS_WINDOWS:
print(" -g, --gmp use GMP.")
print(" --gprof enable gprof")
print(" -f <path> --foci2=<path> use foci2 library at path")
print("")
print("Some influential environment variables:")
if not IS_WINDOWS:
print(" CXX C++ compiler")
print(" CC C compiler")
print(" LDFLAGS Linker flags, e.g., -L<lib dir> if you have libraries in a non-standard directory")
print(" CPPFLAGS Preprocessor flags, e.g., -I<include dir> if you have header files in a non-standard directory")
print(" CXXFLAGS C++ compiler flags")
print(" JDK_HOME JDK installation directory (only relevant if -j or --java option is provided)")
print(" JNI_HOME JNI bindings directory (only relevant if -j or --java option is provided)")
print(" OCAMLC Ocaml byte-code compiler (only relevant with --ml)")
print(" OCAMLOPT Ocaml native compiler (only relevant with --ml)")
print(" OCAML_LIB Ocaml library directory (only relevant with --ml)")
exit(exit_code)
# Parse configuration option for mk_make script
def parse_options():
global VERBOSE, DEBUG_MODE, IS_WINDOWS, VS_X64, ONLY_MAKEFILES, SHOW_CPPS, VS_PROJ, TRACE, VS_PAR, VS_PAR_NUM
global DOTNET_ENABLED, JAVA_ENABLED, ML_ENABLED, STATIC_LIB, PREFIX, GMP, FOCI2, FOCI2LIB, PYTHON_PACKAGE_DIR, GPROF, GIT_HASH
global LINUX_X64
try:
options, remainder = getopt.gnu_getopt(sys.argv[1:],
'b:df:sxhmcvtnp:gj',
['build=', 'debug', 'silent', 'x64', 'help', 'makefiles', 'showcpp', 'vsproj',
'trace', 'nodotnet', 'staticlib', 'prefix=', 'gmp', 'foci2=', 'java', 'parallel=', 'gprof',
'githash=', 'x86', 'ml'])
except:
print("ERROR: Invalid command line option")
display_help(1)
for opt, arg in options:
print('opt = %s, arg = %s' % (opt, arg))
if opt in ('-b', '--build'):
if arg == 'src':
raise MKException('The src directory should not be used to host the Makefile')
set_build_dir(arg)
elif opt in ('-s', '--silent'):
VERBOSE = False
elif opt in ('-d', '--debug'):
DEBUG_MODE = True
elif opt in ('-x', '--x64'):
if not IS_WINDOWS:
raise MKException('x64 compilation mode can only be specified when using Visual Studio')
VS_X64 = True
elif opt in ('--x86'):
LINUX_X64=False
elif opt in ('-h', '--help'):
display_help(0)
elif opt in ('-m', '--makefiles'):
ONLY_MAKEFILES = True
elif opt in ('-c', '--showcpp'):
SHOW_CPPS = True
elif opt in ('-v', '--vsproj'):
VS_PROJ = True
elif opt in ('-t', '--trace'):
TRACE = True
elif opt in ('-n', '--nodotnet'):
DOTNET_ENABLED = False
elif opt in ('--staticlib'):
STATIC_LIB = True
elif not IS_WINDOWS and opt in ('-p', '--prefix'):
PREFIX = arg
PYTHON_PACKAGE_DIR = os.path.join(PREFIX, 'lib', 'python%s' % distutils.sysconfig.get_python_version(), 'dist-packages')
mk_dir(PYTHON_PACKAGE_DIR)
if sys.version >= "3":
mk_dir(os.path.join(PYTHON_PACKAGE_DIR, '__pycache__'))
elif IS_WINDOWS and opt == '--parallel':
VS_PAR = True
VS_PAR_NUM = int(arg)
elif opt in ('-g', '--gmp'):
GMP = True
elif opt in ('-f', '--foci2'):
FOCI2 = True
FOCI2LIB = arg
elif opt in ('-j', '--java'):
JAVA_ENABLED = True
elif opt == '--gprof':
GPROF = True
elif opt == '--githash':
GIT_HASH=arg
elif opt in ('', '--ml'):
ML_ENABLED = True
else:
print("ERROR: Invalid command line option '%s'" % opt)
display_help(1)
# Return a list containing a file names included using '#include' in
# the given C/C++ file named fname.
def extract_c_includes(fname):
result = []
# We look for well behaved #include directives
std_inc_pat = re.compile("[ \t]*#include[ \t]*\"(.*)\"[ \t]*")
system_inc_pat = re.compile("[ \t]*#include[ \t]*\<.*\>[ \t]*")
# We should generate and error for any occurrence of #include that does not match the previous pattern.
non_std_inc_pat = re.compile(".*#include.*")
f = open(fname, 'r')
linenum = 1
for line in f:
m1 = std_inc_pat.match(line)
if m1:
result.append(m1.group(1))
elif not system_inc_pat.match(line) and non_std_inc_pat.match(line):
raise MKException("Invalid #include directive at '%s':%s" % (fname, line))
linenum = linenum + 1
return result
# Given a path dir1/subdir2/subdir3 returns ../../..
def reverse_path(p):
l = p.split(os.sep)
n = len(l)
r = '..'
for i in range(1, n):
r = os.path.join(r, '..')
return r
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def set_build_dir(d):
global BUILD_DIR, REV_BUILD_DIR
BUILD_DIR = norm_path(d)
REV_BUILD_DIR = reverse_path(d)
def set_z3py_dir(p):
global SRC_DIR, Z3PY_SRC_DIR
p = norm_path(p)
full = os.path.join(SRC_DIR, p)
if not os.path.exists(full):
raise MKException("Python bindings directory '%s' does not exist" % full)
Z3PY_SRC_DIR = full
if VERBOSE:
print("Python bindings directory was detected.")
_UNIQ_ID = 0
def mk_fresh_name(prefix):
global _UNIQ_ID
r = '%s_%s' % (prefix, _UNIQ_ID)
_UNIQ_ID = _UNIQ_ID + 1
return r
_Id = 0
_Components = []
_ComponentNames = set()
_Name2Component = {}
_Processed_Headers = set()
# Return the Component object named name
def get_component(name):
return _Name2Component[name]
def get_components():
return _Components
# Return the directory where the python bindings are located.
def get_z3py_dir():
return Z3PY_SRC_DIR
# Return true if in verbose mode
def is_verbose():
return VERBOSE
def is_java_enabled():
return JAVA_ENABLED
def is_ml_enabled():
return ML_ENABLED
def is_compiler(given, expected):
"""
Return True if the 'given' compiler is the expected one.
>>> is_compiler('g++', 'g++')
True
>>> is_compiler('/home/g++', 'g++')
True
>>> is_compiler(os.path.join('home', 'g++'), 'g++')
True
>>> is_compiler('clang++', 'g++')
False
>>> is_compiler(os.path.join('home', 'clang++'), 'clang++')
True
"""
if given == expected:
return True
if len(expected) < len(given):
return given[len(given) - len(expected) - 1] == os.sep and given[len(given) - len(expected):] == expected
return False
def is_CXX_gpp():
return is_compiler(CXX, 'g++')
def is_clang_in_gpp_form(cc):
version_string = check_output([cc, '--version'])
return str(version_string).find('clang') != -1
def is_CXX_clangpp():
if is_compiler(CXX, 'g++'):
return is_clang_in_gpp_form(CXX)
return is_compiler(CXX, 'clang++')
def get_cpp_files(path):
return filter(lambda f: f.endswith('.cpp'), os.listdir(path))
def get_c_files(path):
return filter(lambda f: f.endswith('.c'), os.listdir(path))
def get_cs_files(path):
return filter(lambda f: f.endswith('.cs'), os.listdir(path))
def get_java_files(path):
return filter(lambda f: f.endswith('.java'), os.listdir(path))
def get_ml_files(path):
return filter(lambda f: f.endswith('.ml'), os.listdir(path))
def find_all_deps(name, deps):
new_deps = []
for dep in deps:
if dep in _ComponentNames:
if not (dep in new_deps):
new_deps.append(dep)
for dep_dep in get_component(dep).deps:
if not (dep_dep in new_deps):
new_deps.append(dep_dep)
else:
raise MKException("Unknown component '%s' at '%s'." % (dep, name))
return new_deps
class Component:
def __init__(self, name, path, deps):
global BUILD_DIR, SRC_DIR, REV_BUILD_DIR
if name in _ComponentNames:
raise MKException("Component '%s' was already defined." % name)
if path == None:
path = name
self.name = name
path = norm_path(path)
self.path = path
self.deps = find_all_deps(name, deps)
self.build_dir = path
self.src_dir = os.path.join(SRC_DIR, path)
self.to_src_dir = os.path.join(REV_BUILD_DIR, self.src_dir)
def get_link_name(self):
return os.path.join(self.build_dir, self.name) + '$(LIB_EXT)'
# Find fname in the include paths for the given component.
# ownerfile is only used for creating error messages.
# That is, we were looking for fname when processing ownerfile
def find_file(self, fname, ownerfile):
full_fname = os.path.join(self.src_dir, fname)
if os.path.exists(full_fname):
return self
for dep in self.deps:
c_dep = get_component(dep)
full_fname = os.path.join(c_dep.src_dir, fname)
if os.path.exists(full_fname):
return c_dep
raise MKException("Failed to find include file '%s' for '%s' when processing '%s'." % (fname, ownerfile, self.name))
# Display all dependencies of file basename located in the given component directory.
# The result is displayed at out
def add_cpp_h_deps(self, out, basename):
includes = extract_c_includes(os.path.join(self.src_dir, basename))
out.write(os.path.join(self.to_src_dir, basename))
for include in includes:
owner = self.find_file(include, basename)
out.write(' %s.node' % os.path.join(owner.build_dir, include))
# Add a rule for each #include directive in the file basename located at the current component.
def add_rule_for_each_include(self, out, basename):
fullname = os.path.join(self.src_dir, basename)
includes = extract_c_includes(fullname)
for include in includes:
owner = self.find_file(include, fullname)
owner.add_h_rule(out, include)
# Display a Makefile rule for an include file located in the given component directory.
# 'include' is something of the form: ast.h, polynomial.h
# The rule displayed at out is of the form
# ast/ast_pp.h.node : ../src/util/ast_pp.h util/util.h.node ast/ast.h.node
# @echo "done" > ast/ast_pp.h.node
def add_h_rule(self, out, include):
include_src_path = os.path.join(self.to_src_dir, include)
if include_src_path in _Processed_Headers:
return
_Processed_Headers.add(include_src_path)
self.add_rule_for_each_include(out, include)
include_node = '%s.node' % os.path.join(self.build_dir, include)
out.write('%s: ' % include_node)
self.add_cpp_h_deps(out, include)
out.write('\n')
out.write('\t@echo done > %s\n' % include_node)
def add_cpp_rules(self, out, include_defs, cppfile):
self.add_rule_for_each_include(out, cppfile)
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
srcfile = os.path.join(self.to_src_dir, cppfile)
out.write('%s: ' % objfile)
self.add_cpp_h_deps(out, cppfile)
out.write('\n')
if SHOW_CPPS:
out.write('\t@echo %s\n' % os.path.join(self.src_dir, cppfile))
out.write('\t@$(CXX) $(CXXFLAGS) $(%s) $(CXX_OUT_FLAG)%s %s\n' % (include_defs, objfile, srcfile))
def mk_makefile(self, out):
include_defs = mk_fresh_name('includes')
out.write('%s =' % include_defs)
for dep in self.deps:
out.write(' -I%s' % get_component(dep).to_src_dir)
out.write('\n')
mk_dir(os.path.join(BUILD_DIR, self.build_dir))
if VS_PAR and IS_WINDOWS:
cppfiles = list(get_cpp_files(self.src_dir))
dependencies = set()
for cppfile in cppfiles:
dependencies.add(os.path.join(self.to_src_dir, cppfile))
self.add_rule_for_each_include(out, cppfile)
includes = extract_c_includes(os.path.join(self.src_dir, cppfile))
for include in includes:
owner = self.find_file(include, cppfile)
dependencies.add('%s.node' % os.path.join(owner.build_dir, include))
for cppfile in cppfiles:
out.write('%s$(OBJ_EXT) ' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0]))
out.write(': ')
for dep in dependencies:
out.write(dep)
out.write(' ')
out.write('\n')
out.write('\t@$(CXX) $(CXXFLAGS) /MP%s $(%s)' % (VS_PAR_NUM, include_defs))
for cppfile in cppfiles:
out.write(' ')
out.write(os.path.join(self.to_src_dir, cppfile))
out.write('\n')
out.write('\tmove *.obj %s\n' % self.build_dir)
else:
for cppfile in get_cpp_files(self.src_dir):
self.add_cpp_rules(out, include_defs, cppfile)
# Return true if the component should be included in the all: rule
def main_component(self):
return False
# Return true if the component contains an AssemblyInfo.cs file that needs to be updated.
def has_assembly_info(self):
return False
# Return true if the component needs builder to generate an install_tactics.cpp file
def require_install_tactics(self):
return False
# Return true if the component needs a def file
def require_def_file(self):
return False
# Return true if the component needs builder to generate a mem_initializer.cpp file with mem_initialize() and mem_finalize() functions.
def require_mem_initializer(self):
return False
def mk_install_deps(self, out):
return
def mk_install(self, out):
return
def mk_uninstall(self, out):
return
def is_example(self):
return False
# Invoked when creating a (windows) distribution package using components at build_path, and
# storing them at dist_path
def mk_win_dist(self, build_path, dist_path):
return
def mk_unix_dist(self, build_path, dist_path):
return
class LibComponent(Component):
def __init__(self, name, path, deps, includes2install):
Component.__init__(self, name, path, deps)
self.includes2install = includes2install
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for lib
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
libfile = '%s$(LIB_EXT)' % os.path.join(self.build_dir, self.name)
out.write('%s:' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('\t@$(AR) $(AR_FLAGS) $(AR_OUTFLAG)%s' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('%s: %s\n\n' % (self.name, libfile))
def mk_install_dep(self, out):
out.write('%s' % libfile)
def mk_install(self, out):
for include in self.includes2install:
out.write('\t@cp %s %s\n' % (os.path.join(self.to_src_dir, include), os.path.join('$(PREFIX)', 'include', include)))
def mk_uninstall(self, out):
for include in self.includes2install:
out.write('\t@rm -f %s\n' % os.path.join('$(PREFIX)', 'include', include))
def mk_win_dist(self, build_path, dist_path):
mk_dir(os.path.join(dist_path, 'include'))
for include in self.includes2install:
shutil.copy(os.path.join(self.src_dir, include),
os.path.join(dist_path, 'include', include))
def mk_unix_dist(self, build_path, dist_path):
self.mk_win_dist(build_path, dist_path)
# "Library" containing only .h files. This is just a placeholder for includes files to be installed.
class HLibComponent(LibComponent):
def __init__(self, name, path, includes2install):
LibComponent.__init__(self, name, path, [], includes2install)
def mk_makefile(self, out):
return
# Auxiliary function for sort_components
def comp_components(c1, c2):
id1 = get_component(c1).id
id2 = get_component(c2).id
return id2 - id1
# Sort components based on (reverse) definition time
def sort_components(cnames):
return sorted(cnames, key=lambda c: get_component(c).id, reverse=True)
class ExeComponent(Component):
def __init__(self, name, exe_name, path, deps, install):
Component.__init__(self, name, path, deps)
if exe_name == None:
exe_name = name
self.exe_name = exe_name
self.install = install
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for exe
exefile = '%s$(EXE_EXT)' % self.exe_name
out.write('%s:' % exefile)
deps = sort_components(self.deps)
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write('\n')
out.write('\t$(LINK) $(LINK_OUT_FLAG)%s $(LINK_FLAGS)' % exefile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write(' ' + FOCI2LIB)
out.write(' $(LINK_EXTRA_FLAGS)\n')
out.write('%s: %s\n\n' % (self.name, exefile))
def require_install_tactics(self):
return ('tactic' in self.deps) and ('cmd_context' in self.deps)
def require_mem_initializer(self):
return True
# All executables (to be installed) are included in the all: rule
def main_component(self):
return self.install
def mk_install_dep(self, out):
out.write('%s' % exefile)
def mk_install(self, out):
if self.install:
exefile = '%s$(EXE_EXT)' % self.exe_name
out.write('\t@cp %s %s\n' % (exefile, os.path.join('$(PREFIX)', 'bin', exefile)))
def mk_uninstall(self, out):
exefile = '%s$(EXE_EXT)' % self.exe_name
out.write('\t@rm -f %s\n' % os.path.join('$(PREFIX)', 'bin', exefile))
def mk_win_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy('%s.exe' % os.path.join(build_path, self.exe_name),
'%s.exe' % os.path.join(dist_path, 'bin', self.exe_name))
def mk_unix_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy(os.path.join(build_path, self.exe_name),
os.path.join(dist_path, 'bin', self.exe_name))
class ExtraExeComponent(ExeComponent):
def __init__(self, name, exe_name, path, deps, install):
ExeComponent.__init__(self, name, exe_name, path, deps, install)
def main_component(self):
return False
def require_mem_initializer(self):
return False
def get_so_ext():
sysname = os.uname()[0]
if sysname == 'Darwin':
return 'dylib'
elif sysname == 'Linux' or sysname == 'FreeBSD':
return 'so'
elif sysname == 'CYGWIN':
return 'dll'
else:
assert(False)
return 'dll'
class DLLComponent(Component):
def __init__(self, name, dll_name, path, deps, export_files, reexports, install, static):
Component.__init__(self, name, path, deps)
if dll_name == None:
dll_name = name
self.dll_name = dll_name
self.export_files = export_files
self.reexports = reexports
self.install = install
self.static = static
def get_link_name(self):
if self.static:
return os.path.join(self.build_dir, self.name) + '$(LIB_EXT)'
else:
return self.name + '$(SO_EXT)'
def mk_makefile(self, out):
Component.mk_makefile(self, out)
# generate rule for (SO_EXT)
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('%s:' % dllfile)
deps = sort_components(self.deps)
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
# Explicitly include obj files of reexport. This fixes problems with exported symbols on Linux and OSX.
for reexport in self.reexports:
reexport = get_component(reexport)
for cppfile in get_cpp_files(reexport.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(reexport.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
if not dep in self.reexports:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write(' %s' % os.path.join(get_component('java').to_src_dir, 'Native.cpp'))
out.write('\n')
t = '\t$(CXX) $(CXXFLAGS) $(CXX_OUT_FLAG)api/java/Native$(OBJ_EXT) -I"%s" -I"%s/PLATFORM" -I%s %s/Native.cpp\n' % (JNI_HOME, JNI_HOME, get_component('api').to_src_dir, get_component('java').to_src_dir)
if IS_OSX:
t = t.replace('PLATFORM', 'darwin')
elif IS_LINUX:
t = t.replace('PLATFORM', 'linux')
elif IS_FREEBSD:
t = t.replace('PLATFORM', 'freebsd')
else:
t = t.replace('PLATFORM', 'win32')
out.write(t)
out.write('\t$(LINK) $(SLINK_OUT_FLAG)%s $(SLINK_FLAGS)' % dllfile)
for obj in objs:
out.write(' ')
out.write(obj)
for dep in deps:
if not dep in self.reexports:
c_dep = get_component(dep)
out.write(' ' + c_dep.get_link_name())
out.write(' %s$(OBJ_EXT)' % os.path.join('api', 'java', 'Native'))
out.write(' ' + FOCI2LIB)
out.write(' $(SLINK_EXTRA_FLAGS)')
if IS_WINDOWS:
out.write(' /DEF:%s.def' % os.path.join(self.to_src_dir, self.name))
out.write('\n')
if self.static:
self.mk_static(out)
libfile = '%s$(LIB_EXT)' % self.dll_name
out.write('%s: %s %s\n\n' % (self.name, dllfile, libfile))
else:
out.write('%s: %s\n\n' % (self.name, dllfile))
def mk_static(self, out):
# generate rule for lib
objs = []
for cppfile in get_cpp_files(self.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
# we have to "reexport" all object files
for dep in self.deps:
dep = get_component(dep)
for cppfile in get_cpp_files(dep.src_dir):
objfile = '%s$(OBJ_EXT)' % os.path.join(dep.build_dir, os.path.splitext(cppfile)[0])
objs.append(objfile)
libfile = '%s$(LIB_EXT)' % self.dll_name
out.write('%s:' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
out.write('\t@$(AR) $(AR_FLAGS) $(AR_OUTFLAG)%s' % libfile)
for obj in objs:
out.write(' ')
out.write(obj)
out.write('\n')
def main_component(self):
return self.install
def require_install_tactics(self):
return ('tactic' in self.deps) and ('cmd_context' in self.deps)
def require_mem_initializer(self):
return True
def require_def_file(self):
return IS_WINDOWS and self.export_files
def mk_install_dep(self, out):
out.write('%s$(SO_EXT)' % self.dll_name)
if self.static:
out.write(' %s$(LIB_EXT)' % self.dll_name)
def mk_install(self, out):
if self.install:
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('\t@cp %s %s\n' % (dllfile, os.path.join('$(PREFIX)', 'lib', dllfile)))
out.write('\t@cp %s %s\n' % (dllfile, os.path.join(PYTHON_PACKAGE_DIR, dllfile)))
if self.static:
libfile = '%s$(LIB_EXT)' % self.dll_name
out.write('\t@cp %s %s\n' % (libfile, os.path.join('$(PREFIX)', 'lib', libfile)))
def mk_uninstall(self, out):
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('\t@rm -f %s\n' % os.path.join('$(PREFIX)', 'lib', dllfile))
out.write('\t@rm -f %s\n' % os.path.join(PYTHON_PACKAGE_DIR, dllfile))
libfile = '%s$(LIB_EXT)' % self.dll_name
out.write('\t@rm -f %s\n' % os.path.join('$(PREFIX)', 'lib', libfile))
def mk_win_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy('%s.dll' % os.path.join(build_path, self.dll_name),
'%s.dll' % os.path.join(dist_path, 'bin', self.dll_name))
shutil.copy('%s.lib' % os.path.join(build_path, self.dll_name),
'%s.lib' % os.path.join(dist_path, 'bin', self.dll_name))
def mk_unix_dist(self, build_path, dist_path):
if self.install:
mk_dir(os.path.join(dist_path, 'bin'))
so = get_so_ext()
shutil.copy('%s.%s' % (os.path.join(build_path, self.dll_name), so),
'%s.%s' % (os.path.join(dist_path, 'bin', self.dll_name), so))
shutil.copy('%s.a' % os.path.join(build_path, self.dll_name),
'%s.a' % os.path.join(dist_path, 'bin', self.dll_name))
class DotNetDLLComponent(Component):
def __init__(self, name, dll_name, path, deps, assembly_info_dir):
Component.__init__(self, name, path, deps)
if dll_name == None:
dll_name = name
if assembly_info_dir == None:
assembly_info_dir = "."
self.dll_name = dll_name
self.assembly_info_dir = assembly_info_dir
def mk_makefile(self, out):
if DOTNET_ENABLED:
cs_fp_files = []
cs_files = []
for cs_file in get_cs_files(self.src_dir):
cs_fp_files.append(os.path.join(self.to_src_dir, cs_file))
cs_files.append(cs_file)
if self.assembly_info_dir != '.':
for cs_file in get_cs_files(os.path.join(self.src_dir, self.assembly_info_dir)):
cs_fp_files.append(os.path.join(self.to_src_dir, self.assembly_info_dir, cs_file))
cs_files.append(os.path.join(self.assembly_info_dir, cs_file))
dllfile = '%s.dll' % self.dll_name
out.write('%s: %s$(SO_EXT)' % (dllfile, get_component(Z3_DLL_COMPONENT).dll_name))
for cs_file in cs_fp_files:
out.write(' ')
out.write(cs_file)
out.write('\n')
out.write(' csc /noconfig /unsafe+ /nowarn:1701,1702 /nostdlib+ /errorreport:prompt /warn:4 /reference:mscorlib.dll /reference:System.Core.dll /reference:System.dll /reference:System.Numerics.dll /filealign:512 /linkresource:%s.dll /out:%s.dll /target:library /doc:%s.xml' % (get_component(Z3_DLL_COMPONENT).dll_name, self.dll_name, self.dll_name))
if DEBUG_MODE:
out.write(' /define:DEBUG;TRACE /debug+ /debug:full /optimize-')
else:
out.write(' /optimize+')
if VS_X64:
out.write(' /platform:x64')
else:
out.write(' /platform:x86')
for cs_file in cs_files:
out.write(' %s' % os.path.join(self.to_src_dir, cs_file))
out.write('\n')
out.write('%s: %s\n\n' % (self.name, dllfile))
return
def main_component(self):
return DOTNET_ENABLED
def has_assembly_info(self):
return True
def mk_win_dist(self, build_path, dist_path):
if DOTNET_ENABLED:
# Assuming all DotNET dll should be in the distribution
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy('%s.dll' % os.path.join(build_path, self.dll_name),
'%s.dll' % os.path.join(dist_path, 'bin', self.dll_name))
shutil.copy('%s.xml' % os.path.join(build_path, self.dll_name),
'%s.xml' % os.path.join(dist_path, 'bin', self.dll_name))
if DEBUG_MODE:
shutil.copy('%s.pdb' % os.path.join(build_path, self.dll_name),
'%s.pdb' % os.path.join(dist_path, 'bin', self.dll_name))
def mk_unix_dist(self, build_path, dist_path):
# Do nothing
return
class JavaDLLComponent(Component):
def __init__(self, name, dll_name, package_name, manifest_file, path, deps):
Component.__init__(self, name, path, deps)
if dll_name == None:
dll_name = name
self.dll_name = dll_name
self.package_name = package_name
self.manifest_file = manifest_file
self.install = not is_windows()
def mk_makefile(self, out):
global JAVAC
global JAR
if is_java_enabled():
mk_dir(os.path.join(BUILD_DIR, 'api', 'java', 'classes'))
out.write('libz3java$(SO_EXT): libz3$(SO_EXT) %s\n' % os.path.join(self.to_src_dir, 'Native.cpp'))
out.write('\t')
if IS_WINDOWS:
out.write('@copy ')
else:
out.write('@cp ')
out.write('libz3$(SO_EXT) libz3java$(SO_EXT)\n\n')
out.write('%s.jar: libz3java$(SO_EXT) ' % self.package_name)
deps = ''
for jfile in get_java_files(self.src_dir):
deps += ('%s ' % os.path.join(self.to_src_dir, jfile))
for jfile in get_java_files(os.path.join(self.src_dir, "enumerations")):
deps += '%s ' % os.path.join(self.to_src_dir, 'enumerations', jfile)
out.write(deps)
out.write('\n')
#if IS_WINDOWS:
JAVAC = '"%s"' % JAVAC
JAR = '"%s"' % JAR
t = ('\t%s %s.java -d %s\n' % (JAVAC, os.path.join(self.to_src_dir, 'enumerations', '*'), os.path.join('api', 'java', 'classes')))
out.write(t)
t = ('\t%s -cp %s %s.java -d %s\n' % (JAVAC,
os.path.join('api', 'java', 'classes'),
os.path.join(self.to_src_dir, '*'),
os.path.join('api', 'java', 'classes')))
out.write(t)
out.write('\t%s cfm %s.jar %s -C %s .\n' % (JAR, self.package_name,
os.path.join(self.to_src_dir, 'manifest'),
os.path.join('api', 'java', 'classes')))
out.write('java: %s.jar\n\n' % self.package_name)
def main_component(self):
return is_java_enabled()
def mk_win_dist(self, build_path, dist_path):
if JAVA_ENABLED:
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy('%s.jar' % os.path.join(build_path, self.package_name),
'%s.jar' % os.path.join(dist_path, 'bin', self.package_name))
shutil.copy(os.path.join(build_path, 'libz3java.dll'),
os.path.join(dist_path, 'bin', 'libz3java.dll'))
shutil.copy(os.path.join(build_path, 'libz3java.lib'),
os.path.join(dist_path, 'bin', 'libz3java.lib'))
def mk_unix_dist(self, build_path, dist_path):
if JAVA_ENABLED:
mk_dir(os.path.join(dist_path, 'bin'))
shutil.copy('%s.jar' % os.path.join(build_path, self.package_name),
'%s.jar' % os.path.join(dist_path, 'bin', self.package_name))
so = get_so_ext()
shutil.copy(os.path.join(build_path, 'libz3java.%s' % so),
os.path.join(dist_path, 'bin', 'libz3java.%s' % so))
def mk_install(self, out):
if is_java_enabled() and self.install:
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('\t@cp %s %s\n' % (dllfile, os.path.join('$(PREFIX)', 'lib', dllfile)))
out.write('\t@cp %s.jar %s.jar\n' % (self.package_name, os.path.join('$(PREFIX)', 'lib', self.package_name)))
def mk_uninstall(self, out):
if is_java_enabled() and self.install:
dllfile = '%s$(SO_EXT)' % self.dll_name
out.write('\t@rm %s\n' % (os.path.join('$(PREFIX)', 'lib', dllfile)))
out.write('\t@rm %s.jar\n' % (os.path.join('$(PREFIX)', 'lib', self.package_name)))
class MLComponent(Component):
def __init__(self, name, lib_name, path, deps):
Component.__init__(self, name, path, deps)
if lib_name == None:
lib_name = name
self.lib_name = lib_name
def mk_ml_meta(self, ml_meta_in, ml_meta_out, major, minor, build, revision):
ver_pat = re.compile('version = "VERSION"*')
fin = open(ml_meta_in, 'r')
fout = open(ml_meta_out, 'w')
num_updates = 0
for line in fin:
if ver_pat.match(line):
fout.write('version = "%s.%s.%s.%s"\n' % (major, minor, build, revision))
num_updates = num_updates + 1
else:
fout.write(line)
assert num_updates == 1, "unexpected number of version number updates"
fin.close()
fout.close()
if VERBOSE:
print("Updated '%s'" % ml_meta_out)
def mk_makefile(self, out):
if is_ml_enabled():
CP_CMD = "cp"
if IS_WINDOWS:
CP_CMD = "copy"
src_dir = self.to_src_dir
sub_dir = os.path.join('api', 'ml')
mk_dir(os.path.join(BUILD_DIR, sub_dir))
api_src = get_component(API_COMPONENT).to_src_dir
out.write('CXXFLAGS_OCAML=$(CXXFLAGS:/GL=)\n') # remove /GL; the ocaml tools don't like it.
for f in filter(lambda f: f.endswith('.ml'), os.listdir(self.src_dir)):
out.write('%s: %s\n' % (os.path.join(sub_dir,f),os.path.join(src_dir,f)))
str = '\t%s %s %s\n' % (CP_CMD,os.path.join(src_dir,f),os.path.join(sub_dir,f))
out.write(str)
for f in filter(lambda f: f.endswith('.mli'), os.listdir(self.src_dir)):
out.write('%s: %s\n' % (os.path.join(sub_dir,f),os.path.join(src_dir,f)))
str = '\t%s %s %s\n' % (CP_CMD,os.path.join(src_dir,f),os.path.join(sub_dir,f))
out.write(str)
for f in filter(lambda f: f.endswith('.c'), os.listdir(self.src_dir)):
out.write('%s: %s\n' % (os.path.join(sub_dir,f),os.path.join(src_dir,f)))
str = '\t%s %s %s\n' % (CP_CMD,os.path.join(src_dir,f),os.path.join(sub_dir,f))
out.write(str)
modules = ["z3enums", "z3native", "z3"] # dependencies in this order!
mls = ''
mlis = ''
cmis = ''
archives = ''
for m in modules:
fn = os.path.join(self.src_dir, ('%s.mli' % m))
if not os.path.exists(fn):
out.write('%s.mli: %s.ml%s\n' % (os.path.join(sub_dir,m),os.path.join(sub_dir,m),mlis))
out.write('\t%s -I %s -i -c %s.ml > %s.mli\n' % (OCAMLC,sub_dir,os.path.join(sub_dir, m),os.path.join(sub_dir, m)))
out.write('%s.cmi: %s.mli%s\n' % (os.path.join(sub_dir,m),os.path.join(sub_dir,m), cmis))
out.write('\t%s -I %s -c %s.mli\n' % (OCAMLC,sub_dir,os.path.join(sub_dir,m)))
out.write('%s.cma: %s.ml %s.cmi%s\n' % (os.path.join(sub_dir,m),os.path.join(sub_dir,m),os.path.join(sub_dir,m), archives))
out.write('\t%s -a -o %s.ml -o %s.cma\n' % (OCAMLC,os.path.join(sub_dir,m), os.path.join(sub_dir,m)))
mlis = mlis + ' ' + os.path.join(sub_dir, m) + '.mli'
cmis = cmis + ' ' + os.path.join(sub_dir,m) + '.cmi'
archives = archives + ' ' + os.path.join(sub_dir,m) + '.cma'
mls = mls + ' ' + os.path.join(sub_dir, m) + '.ml'
out.write('%s: %s %s\n' %
(os.path.join(sub_dir, 'z3native_stubs$(OBJ_EXT)'),
os.path.join(sub_dir, 'z3native_stubs.c'),
get_component(Z3_DLL_COMPONENT).dll_name+'$(SO_EXT)'));
out.write('\t$(CC) $(CXXFLAGS_OCAML) -I %s -I %s %s $(CXX_OUT_FLAG)%s$(OBJ_EXT)\n' %
(OCAML_LIB, api_src, os.path.join(sub_dir, 'z3native_stubs.c'), os.path.join(sub_dir, 'z3native_stubs')))
out.write('%s: %s %s %s$(SO_EXT)' % (
os.path.join(sub_dir, "z3ml.cmxa"),
cmis,
archives,
get_component(Z3_DLL_COMPONENT).dll_name))
out.write(' %s\n' % (os.path.join(sub_dir, 'z3native_stubs$(OBJ_EXT)')))
out.write('\tocamlmklib -o %s -I %s -ldopt \"-L. -lz3\" ' % (os.path.join(sub_dir, 'z3ml'), sub_dir))
for m in modules:
out.write(' %s' % (os.path.join(sub_dir, m+'.ml')))
out.write(' %s\n' % (os.path.join(sub_dir, 'z3native_stubs$(OBJ_EXT)')))
out.write('ml: %s\n' % (os.path.join(sub_dir, 'z3ml.cmxa')))
self.mk_ml_meta(os.path.join('src/api/ml/META'), os.path.join(BUILD_DIR, sub_dir, 'META'), VER_MAJOR, VER_MINOR, VER_BUILD, VER_REVISION)
if OCAMLFIND != '':
out.write('\nocamlfind_install: %s %s %s\n' % (
get_component(Z3_DLL_COMPONENT).dll_name + '$(SO_EXT)',
os.path.join(sub_dir, 'z3ml.cmxa'),
os.path.join(sub_dir, 'META')))
out.write('\t%s remove Z3\n' % (OCAMLFIND))
out.write('\t%s install Z3 %s' % (OCAMLFIND, (os.path.join(sub_dir, 'META'))))
for m in modules:
out.write(' %s.cma' % (os.path.join(sub_dir, m)))
out.write(' %s.cmx' % (os.path.join(sub_dir, m)))
out.write(' %s.cmi' % (os.path.join(sub_dir, m)))
out.write(' %s.cmo' % (os.path.join(sub_dir, m)))
out.write(' %s.ml' % (os.path.join(sub_dir, m)))
out.write(' %s.mli' % (os.path.join(sub_dir, m)))
out.write(' %s$(OBJ_EXT)' % (os.path.join(sub_dir, m)))
out.write(' %s' % ((os.path.join(sub_dir, 'z3ml$(LIB_EXT)'))))
out.write(' %s' % ((os.path.join(sub_dir, 'z3ml.cma'))))
out.write(' %s' % ((os.path.join(sub_dir, 'z3ml.cmxa'))))
out.write(' %s' % ((os.path.join(sub_dir, 'libz3ml$(LIB_EXT)'))))
out.write(' %s' % ((os.path.join(sub_dir, 'dllz3ml'))))
if IS_WINDOWS:
out.write('.dll')
else:
out.write('.so') # .so also on OSX!
out.write(' ' + get_component(Z3_DLL_COMPONENT).dll_name + '$(SO_EXT)')
if IS_WINDOWS:
out.write(' ' + get_component(Z3_DLL_COMPONENT).dll_name + '$(LIB_EXT)')
out.write('\n\n')
def main_component(self):
return is_ml_enabled()
class ExampleComponent(Component):
def __init__(self, name, path):
Component.__init__(self, name, path, [])
self.ex_dir = os.path.join(EXAMPLE_DIR, self.path)
self.to_ex_dir = os.path.join(REV_BUILD_DIR, self.ex_dir)
def is_example(self):
return True
class CppExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def compiler(self):
return "$(CXX)"
def src_files(self):
return get_cpp_files(self.ex_dir)
def mk_makefile(self, out):
dll_name = get_component(Z3_DLL_COMPONENT).dll_name
dll = '%s$(SO_EXT)' % dll_name
exefile = '%s$(EXE_EXT)' % self.name
out.write('%s: %s' % (exefile, dll))
for cppfile in self.src_files():
out.write(' ')
out.write(os.path.join(self.to_ex_dir, cppfile))
out.write('\n')
out.write('\t%s $(OS_DEFINES) $(EXAMP_DEBUG_FLAG) $(LINK_OUT_FLAG)%s $(LINK_FLAGS)' % (self.compiler(), exefile))
# Add include dir components
out.write(' -I%s' % get_component(API_COMPONENT).to_src_dir)
out.write(' -I%s' % get_component(CPP_COMPONENT).to_src_dir)
for cppfile in self.src_files():
out.write(' ')
out.write(os.path.join(self.to_ex_dir, cppfile))
out.write(' ')
if IS_WINDOWS:
out.write('%s.lib' % dll_name)
else:
out.write(dll)
out.write(' $(LINK_EXTRA_FLAGS)\n')
out.write('_ex_%s: %s\n\n' % (self.name, exefile))
class CExampleComponent(CppExampleComponent):
def __init__(self, name, path):
CppExampleComponent.__init__(self, name, path)
def compiler(self):
return "$(CC)"
def src_files(self):
return get_c_files(self.ex_dir)
class DotNetExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return IS_WINDOWS
def mk_makefile(self, out):
if DOTNET_ENABLED:
dll_name = get_component(DOTNET_COMPONENT).dll_name
dll = '%s.dll' % dll_name
exefile = '%s$(EXE_EXT)' % self.name
out.write('%s: %s' % (exefile, dll))
for csfile in get_cs_files(self.ex_dir):
out.write(' ')
out.write(os.path.join(self.to_ex_dir, csfile))
out.write('\n')
out.write('\tcsc /out:%s /reference:%s /debug:full /reference:System.Numerics.dll' % (exefile, dll))
if VS_X64:
out.write(' /platform:x64')
else:
out.write(' /platform:x86')
for csfile in get_cs_files(self.ex_dir):
out.write(' ')
# HACK
win_ex_dir = self.to_ex_dir.replace('/', '\\')
out.write(os.path.join(win_ex_dir, csfile))
out.write('\n')
out.write('_ex_%s: %s\n\n' % (self.name, exefile))
class JavaExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return JAVA_ENABLED
def mk_makefile(self, out):
if JAVA_ENABLED:
pkg = get_component(JAVA_COMPONENT).package_name + '.jar'
out.write('JavaExample.class: %s' % (pkg))
deps = ''
for jfile in get_java_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, jfile))
if IS_WINDOWS:
deps = deps.replace('/', '\\')
out.write('%s\n' % deps)
out.write('\t%s -cp %s ' % (JAVAC, pkg))
win_ex_dir = self.to_ex_dir
for javafile in get_java_files(self.ex_dir):
out.write(' ')
out.write(os.path.join(win_ex_dir, javafile))
out.write(' -d .\n')
out.write('_ex_%s: JavaExample.class\n\n' % (self.name))
class MLExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
def is_example(self):
return ML_ENABLED
def mk_makefile(self, out):
if ML_ENABLED:
out.write('ml_example.byte: api/ml/z3ml.cmxa ')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, mlfile))
out.write('\n')
out.write('\t%s ' % OCAMLC)
if DEBUG_MODE:
out.write('-g ')
out.write('-custom -o ml_example.byte -I api/ml -cclib "-L. -lz3" nums.cma z3ml.cma')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s/%s' % (self.to_ex_dir, mlfile))
out.write('\n')
out.write('ml_example$(EXE_EXT): api/ml/z3ml.cmxa ml_example.byte')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s' % os.path.join(self.to_ex_dir, mlfile))
out.write('\n')
out.write('\t%s ' % OCAMLOPT)
if DEBUG_MODE:
out.write('-g ')
out.write('-o ml_example$(EXE_EXT) -I api/ml -cclib "-L. -lz3" nums.cmxa z3ml.cmxa')
for mlfile in get_ml_files(self.ex_dir):
out.write(' %s/%s' % (self.to_ex_dir, mlfile))
out.write('\n')
out.write('_ex_%s: ml_example.byte ml_example$(EXE_EXT)\n\n' % self.name)
class PythonExampleComponent(ExampleComponent):
def __init__(self, name, path):
ExampleComponent.__init__(self, name, path)
# Python examples are just placeholders, we just copy the *.py files when mk_makefile is invoked.
# We don't need to include them in the :examples rule
def mk_makefile(self, out):
full = os.path.join(EXAMPLE_DIR, self.path)
for py in filter(lambda f: f.endswith('.py'), os.listdir(full)):
shutil.copyfile(os.path.join(full, py), os.path.join(BUILD_DIR, py))
if is_verbose():
print("Copied Z3Py example '%s' to '%s'" % (py, BUILD_DIR))
out.write('_ex_%s: \n\n' % self.name)
def reg_component(name, c):
global _Id, _Components, _ComponentNames, _Name2Component
c.id = _Id
_Id = _Id + 1
_Components.append(c)
_ComponentNames.add(name)
_Name2Component[name] = c
if VERBOSE:
print("New component: '%s'" % name)
def add_lib(name, deps=[], path=None, includes2install=[]):
c = LibComponent(name, path, deps, includes2install)
reg_component(name, c)
def add_hlib(name, path=None, includes2install=[]):
c = HLibComponent(name, path, includes2install)
reg_component(name, c)
def add_exe(name, deps=[], path=None, exe_name=None, install=True):
c = ExeComponent(name, exe_name, path, deps, install)
reg_component(name, c)
def add_extra_exe(name, deps=[], path=None, exe_name=None, install=True):
c = ExtraExeComponent(name, exe_name, path, deps, install)
reg_component(name, c)
def add_dll(name, deps=[], path=None, dll_name=None, export_files=[], reexports=[], install=True, static=False):
c = DLLComponent(name, dll_name, path, deps, export_files, reexports, install, static)
reg_component(name, c)
def add_dot_net_dll(name, deps=[], path=None, dll_name=None, assembly_info_dir=None):
c = DotNetDLLComponent(name, dll_name, path, deps, assembly_info_dir)
reg_component(name, c)
def add_java_dll(name, deps=[], path=None, dll_name=None, package_name=None, manifest_file=None):
c = JavaDLLComponent(name, dll_name, package_name, manifest_file, path, deps)
reg_component(name, c)
def add_ml_lib(name, deps=[], path=None, lib_name=None):
c = MLComponent(name, lib_name, path, deps)
reg_component(name, c)
def add_cpp_example(name, path=None):
c = CppExampleComponent(name, path)
reg_component(name, c)
def add_c_example(name, path=None):
c = CExampleComponent(name, path)
reg_component(name, c)
def add_dotnet_example(name, path=None):
c = DotNetExampleComponent(name, path)
reg_component(name, c)
def add_java_example(name, path=None):
c = JavaExampleComponent(name, path)
reg_component(name, c)
def add_ml_example(name, path=None):
c = MLExampleComponent(name, path)
reg_component(name, c)
def add_z3py_example(name, path=None):
c = PythonExampleComponent(name, path)
reg_component(name, c)
def mk_config():
if ONLY_MAKEFILES:
return
config = open(os.path.join(BUILD_DIR, 'config.mk'), 'w')
if IS_WINDOWS:
config.write(
'CC=cl\n'
'CXX=cl\n'
'CXX_OUT_FLAG=/Fo\n'
'OBJ_EXT=.obj\n'
'LIB_EXT=.lib\n'
'AR=lib\n'
'AR_FLAGS=/nologo /LTCG\n'
'AR_OUTFLAG=/OUT:\n'
'EXE_EXT=.exe\n'
'LINK=cl\n'
'LINK_OUT_FLAG=/Fe\n'
'SO_EXT=.dll\n'
'SLINK=cl\n'
'SLINK_OUT_FLAG=/Fe\n'
'OS_DEFINES=/D _WINDOWS\n')
extra_opt = ''
HAS_OMP = test_openmp('cl')
if HAS_OMP:
extra_opt = ' /openmp'
else:
extra_opt = ' -D_NO_OMP_'
if GIT_HASH:
extra_opt = ' %s /D Z3GITHASH=%s' % (extra_opt, GIT_HASH)
if DEBUG_MODE:
config.write(
'LINK_FLAGS=/nologo /MDd\n'
'SLINK_FLAGS=/nologo /LDd\n')
if not VS_X64:
config.write(
'CXXFLAGS=/c /GL /Zi /nologo /W3 /WX- /Od /Oy- /D WIN32 /D _DEBUG /D Z3DEBUG %s /D _CONSOLE /D _TRACE /D _WINDOWS /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /Zc:wchar_t /Zc:forScope /Gd /analyze- /arch:SSE2\n' % extra_opt)
config.write(
'LINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X86 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT\n'
'SLINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X86 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE:NO\n')
else:
config.write(
'CXXFLAGS=/c /GL /Zi /nologo /W3 /WX- /Od /Oy- /D WIN32 /D _AMD64_ /D _DEBUG /D Z3DEBUG %s /D _CONSOLE /D _TRACE /D _WINDOWS /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /Zc:wchar_t /Zc:forScope /Gd /analyze-\n' % extra_opt)
config.write(
'LINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X64 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT\n'
'SLINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X64 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE:NO\n')
else:
# Windows Release mode
config.write(
'LINK_FLAGS=/nologo /MD\n'
'SLINK_FLAGS=/nologo /LD\n')
if TRACE:
extra_opt = '%s /D _TRACE ' % extra_opt
if not VS_X64:
config.write(
'CXXFLAGS=/nologo /c /GL /Zi /W3 /WX- /O2 /Oy- /D _EXTERNAL_RELEASE /D WIN32 /D NDEBUG %s /D _CONSOLE /D _WINDOWS /D ASYNC_COMMANDS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Gd /analyze- /arch:SSE2\n' % extra_opt)
config.write(
'LINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X86 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE /NXCOMPAT\n'
'SLINK_EXTRA_FLAGS=/link /LTCG /DEBUG /MACHINE:X86 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608 /OPT:REF /OPT:ICF /TLBID:1 /DYNAMICBASE:NO\n')
else:
config.write(
'CXXFLAGS=/c /GL /Zi /nologo /W3 /WX- /O2 /D _EXTERNAL_RELEASE /D WIN32 /D NDEBUG %s /D _LIB /D _WINDOWS /D _AMD64_ /D _UNICODE /D UNICODE /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Gd /TP\n' % extra_opt)
config.write(
'LINK_EXTRA_FLAGS=/link /LTCG /MACHINE:X64 /SUBSYSTEM:CONSOLE /INCREMENTAL:NO /STACK:8388608\n'
'SLINK_EXTRA_FLAGS=/link /LTCG /MACHINE:X64 /SUBSYSTEM:WINDOWS /INCREMENTAL:NO /STACK:8388608\n')
# End of Windows VS config.mk
if is_verbose():
print('64-bit: %s' % is64())
print('OpenMP: %s' % HAS_OMP)
if is_java_enabled():
print('JNI Bindings: %s' % JNI_HOME)
print('Java Compiler: %s' % JAVAC)
if is_ml_enabled():
print('OCaml Compiler: %s' % OCAMLC)
print('OCaml Native: %s' % OCAMLOPT)
print('OCaml Library: %s' % OCAML_LIB)
else:
global CXX, CC, GMP, FOCI2, CPPFLAGS, CXXFLAGS, LDFLAGS, EXAMP_DEBUG_FLAG
OS_DEFINES = ""
ARITH = "internal"
check_ar()
CXX = find_cxx_compiler()
CC = find_c_compiler()
SLIBEXTRAFLAGS = ''
if GPROF:
CXXFLAGS = '%s -pg' % CXXFLAGS
LDFLAGS = '%s -pg' % LDFLAGS
if GMP:
test_gmp(CXX)
ARITH = "gmp"
CPPFLAGS = '%s -D_MP_GMP' % CPPFLAGS
LDFLAGS = '%s -lgmp' % LDFLAGS
SLIBEXTRAFLAGS = '%s -lgmp' % SLIBEXTRAFLAGS
else:
CPPFLAGS = '%s -D_MP_INTERNAL' % CPPFLAGS
if FOCI2:
if test_foci2(CXX,FOCI2LIB):
LDFLAGS = '%s %s' % (LDFLAGS,FOCI2LIB)
SLIBEXTRAFLAGS = '%s %s' % (SLIBEXTRAFLAGS,FOCI2LIB)
CPPFLAGS = '%s -D_FOCI2' % CPPFLAGS
else:
print("FAILED\n")
FOCI2 = False
if GIT_HASH:
CPPFLAGS = '%s -DZ3GITHASH=%s' % (CPPFLAGS, GIT_HASH)
CXXFLAGS = '%s -fvisibility=hidden -c' % CXXFLAGS
HAS_OMP = test_openmp(CXX)
if HAS_OMP:
CXXFLAGS = '%s -fopenmp -mfpmath=sse' % CXXFLAGS
LDFLAGS = '%s -fopenmp' % LDFLAGS
SLIBEXTRAFLAGS = '%s -fopenmp' % SLIBEXTRAFLAGS
else:
CXXFLAGS = '%s -D_NO_OMP_' % CXXFLAGS
if DEBUG_MODE:
CXXFLAGS = '%s -g -Wall' % CXXFLAGS
EXAMP_DEBUG_FLAG = '-g'
else:
if GPROF:
CXXFLAGS = '%s -O3 -D _EXTERNAL_RELEASE' % CXXFLAGS
else:
CXXFLAGS = '%s -O3 -D _EXTERNAL_RELEASE -fomit-frame-pointer' % CXXFLAGS
if is_CXX_clangpp():
CXXFLAGS = '%s -Wno-unknown-pragmas -Wno-overloaded-virtual -Wno-unused-value' % CXXFLAGS
sysname = os.uname()[0]
if sysname == 'Darwin':
SO_EXT = '.dylib'
SLIBFLAGS = '-dynamiclib'
elif sysname == 'Linux':
CXXFLAGS = '%s -fno-strict-aliasing -D_LINUX_' % CXXFLAGS
OS_DEFINES = '-D_LINUX'
SO_EXT = '.so'
LDFLAGS = '%s -lrt' % LDFLAGS
SLIBFLAGS = '-shared'
SLIBEXTRAFLAGS = '%s -lrt' % SLIBEXTRAFLAGS
elif sysname == 'FreeBSD':
CXXFLAGS = '%s -fno-strict-aliasing -D_FREEBSD_' % CXXFLAGS
OS_DEFINES = '-D_FREEBSD_'
SO_EXT = '.so'
LDFLAGS = '%s -lrt' % LDFLAGS
SLIBFLAGS = '-shared'
SLIBEXTRAFLAGS = '%s -lrt' % SLIBEXTRAFLAGS
elif sysname[:6] == 'CYGWIN':
CXXFLAGS = '%s -D_CYGWIN -fno-strict-aliasing' % CXXFLAGS
OS_DEFINES = '-D_CYGWIN'
SO_EXT = '.dll'
SLIBFLAGS = '-shared'
else:
raise MKException('Unsupported platform: %s' % sysname)
if is64():
CXXFLAGS = '%s -fPIC' % CXXFLAGS
CPPFLAGS = '%s -D_AMD64_' % CPPFLAGS
if sysname == 'Linux':
CPPFLAGS = '%s -D_USE_THREAD_LOCAL' % CPPFLAGS
elif not LINUX_X64:
CXXFLAGS = '%s -m32' % CXXFLAGS
LDFLAGS = '%s -m32' % LDFLAGS
SLIBFLAGS = '%s -m32' % SLIBFLAGS
if DEBUG_MODE:
CPPFLAGS = '%s -DZ3DEBUG' % CPPFLAGS
if TRACE or DEBUG_MODE:
CPPFLAGS = '%s -D_TRACE' % CPPFLAGS
CXXFLAGS = '%s -msse -msse2' % CXXFLAGS
config.write('PREFIX=%s\n' % PREFIX)
config.write('CC=%s\n' % CC)
config.write('CXX=%s\n' % CXX)
config.write('CXXFLAGS=%s %s\n' % (CPPFLAGS, CXXFLAGS))
config.write('EXAMP_DEBUG_FLAG=%s\n' % EXAMP_DEBUG_FLAG)
config.write('CXX_OUT_FLAG=-o \n')
config.write('OBJ_EXT=.o\n')
config.write('LIB_EXT=.a\n')
config.write('AR=ar\n')
config.write('AR_FLAGS=rcs\n')
config.write('AR_OUTFLAG=\n')
config.write('EXE_EXT=\n')
config.write('LINK=%s\n' % CXX)
config.write('LINK_FLAGS=\n')
config.write('LINK_OUT_FLAG=-o \n')
config.write('LINK_EXTRA_FLAGS=-lpthread %s\n' % LDFLAGS)
config.write('SO_EXT=%s\n' % SO_EXT)
config.write('SLINK=%s\n' % CXX)
config.write('SLINK_FLAGS=%s\n' % SLIBFLAGS)
config.write('SLINK_EXTRA_FLAGS=%s\n' % SLIBEXTRAFLAGS)
config.write('SLINK_OUT_FLAG=-o \n')
config.write('OS_DEFINES=%s\n' % OS_DEFINES)
if is_verbose():
print('Host platform: %s' % sysname)
print('C++ Compiler: %s' % CXX)
print('C Compiler : %s' % CC)
print('Arithmetic: %s' % ARITH)
print('OpenMP: %s' % HAS_OMP)
print('Prefix: %s' % PREFIX)
print('64-bit: %s' % is64())
if GPROF:
print('gprof: enabled')
print('Python version: %s' % distutils.sysconfig.get_python_version())
if is_java_enabled():
print('JNI Bindings: %s' % JNI_HOME)
print('Java Compiler: %s' % JAVAC)
if is_ml_enabled():
print('OCaml Compiler: %s' % OCAMLC)
print('OCaml Native: %s' % OCAMLOPT)
print('OCaml Library: %s' % OCAML_LIB)
def mk_install(out):
out.write('install: ')
for c in get_components():
c.mk_install_deps(out)
out.write(' ')
if is_ml_enabled() and OCAMLFIND != '':
out.write('ocamlfind_install')
out.write('\n')
out.write('\t@mkdir -p %s\n' % os.path.join('$(PREFIX)', 'bin'))
out.write('\t@mkdir -p %s\n' % os.path.join('$(PREFIX)', 'include'))
out.write('\t@mkdir -p %s\n' % os.path.join('$(PREFIX)', 'lib'))
for c in get_components():
c.mk_install(out)
out.write('\t@cp z3*.py %s\n' % PYTHON_PACKAGE_DIR)
if sys.version >= "3":
out.write('\t@cp %s*.pyc %s\n' % (os.path.join('__pycache__', 'z3'),
os.path.join(PYTHON_PACKAGE_DIR, '__pycache__')))
else:
out.write('\t@cp z3*.pyc %s\n' % PYTHON_PACKAGE_DIR)
out.write('\t@echo Z3 was successfully installed.\n')
if PYTHON_PACKAGE_DIR != distutils.sysconfig.get_python_lib():
if os.uname()[0] == 'Darwin':
LD_LIBRARY_PATH = "DYLD_LIBRARY_PATH"
else:
LD_LIBRARY_PATH = "LD_LIBRARY_PATH"
out.write('\t@echo Z3 shared libraries were installed at \'%s\', make sure this directory is in your %s environment variable.\n' %
(os.path.join(PREFIX, 'lib'), LD_LIBRARY_PATH))
out.write('\t@echo Z3Py was installed at \'%s\', make sure this directory is in your PYTHONPATH environment variable.' % PYTHON_PACKAGE_DIR)
out.write('\n')
def mk_uninstall(out):
out.write('uninstall:\n')
for c in get_components():
c.mk_uninstall(out)
out.write('\t@rm -f %s*.py\n' % os.path.join(PYTHON_PACKAGE_DIR, 'z3'))
out.write('\t@rm -f %s*.pyc\n' % os.path.join(PYTHON_PACKAGE_DIR, 'z3'))
out.write('\t@rm -f %s*.pyc\n' % os.path.join(PYTHON_PACKAGE_DIR, '__pycache__', 'z3'))
out.write('\t@echo Z3 was successfully uninstalled.\n')
out.write('\n')
# Generate the Z3 makefile
def mk_makefile():
mk_dir(BUILD_DIR)
mk_config()
if VERBOSE:
print("Writing %s" % os.path.join(BUILD_DIR, 'Makefile'))
out = open(os.path.join(BUILD_DIR, 'Makefile'), 'w')
out.write('# Automatically generated file.\n')
out.write('include config.mk\n')
# Generate :all rule
out.write('all:')
for c in get_components():
if c.main_component():
out.write(' %s' % c.name)
out.write('\n\t@echo Z3 was successfully built.\n')
out.write("\t@echo \"Z3Py scripts can already be executed in the \'%s\' directory.\"\n" % BUILD_DIR)
out.write("\t@echo \"Z3Py scripts stored in arbitrary directories can be also executed if \'%s\' directory is added to the PYTHONPATH environment variable.\"\n" % BUILD_DIR)
if not IS_WINDOWS:
out.write("\t@echo Use the following command to install Z3 at prefix $(PREFIX).\n")
out.write('\t@echo " sudo make install"\n')
# Generate :examples rule
out.write('examples:')
for c in get_components():
if c.is_example():
out.write(' _ex_%s' % c.name)
out.write('\n\t@echo Z3 examples were successfully built.\n')
# Generate components
for c in get_components():
c.mk_makefile(out)
# Generate install/uninstall rules if not WINDOWS
if not IS_WINDOWS:
mk_install(out)
mk_uninstall(out)
# Finalize
if VERBOSE:
print("Makefile was successfully generated.")
if not IS_WINDOWS:
print(" python packages dir: %s" % PYTHON_PACKAGE_DIR)
if DEBUG_MODE:
print(" compilation mode: Debug")
else:
print(" compilation mode: Release")
if IS_WINDOWS:
if VS_X64:
print(" platform: x64\n")
print("To build Z3, open a [Visual Studio x64 Command Prompt], then")
else:
print(" platform: x86")
print("To build Z3, open a [Visual Studio Command Prompt], then")
print("type 'cd %s && nmake'\n" % os.path.join(os.getcwd(), BUILD_DIR))
print('Remark: to open a Visual Studio Command Prompt, go to: "Start > All Programs > Visual Studio > Visual Studio Tools"')
else:
print("Type 'cd %s; make' to build Z3" % BUILD_DIR)
# Generate automatically generated source code
def mk_auto_src():
if not ONLY_MAKEFILES:
exec_pyg_scripts()
mk_pat_db()
mk_all_install_tactic_cpps()
mk_all_mem_initializer_cpps()
mk_all_gparams_register_modules()
UINT = 0
BOOL = 1
DOUBLE = 2
STRING = 3
SYMBOL = 4
UINT_MAX = 4294967295
CURR_PYG = None
def get_curr_pyg():
return CURR_PYG
TYPE2CPK = { UINT : 'CPK_UINT', BOOL : 'CPK_BOOL', DOUBLE : 'CPK_DOUBLE', STRING : 'CPK_STRING', SYMBOL : 'CPK_SYMBOL' }
TYPE2CTYPE = { UINT : 'unsigned', BOOL : 'bool', DOUBLE : 'double', STRING : 'char const *', SYMBOL : 'symbol' }
TYPE2GETTER = { UINT : 'get_uint', BOOL : 'get_bool', DOUBLE : 'get_double', STRING : 'get_str', SYMBOL : 'get_sym' }
def pyg_default(p):
if p[1] == BOOL:
if p[2]:
return "true"
else:
return "false"
return p[2]
def pyg_default_as_c_literal(p):
if p[1] == BOOL:
if p[2]:
return "true"
else:
return "false"
elif p[1] == STRING:
return '"%s"' % p[2]
elif p[1] == SYMBOL:
return 'symbol("%s")' % p[2]
elif p[1] == UINT:
return '%su' % p[2]
else:
return p[2]
def to_c_method(s):
return s.replace('.', '_')
def def_module_params(module_name, export, params, class_name=None, description=None):
pyg = get_curr_pyg()
dirname = os.path.split(get_curr_pyg())[0]
if class_name == None:
class_name = '%s_params' % module_name
hpp = os.path.join(dirname, '%s.hpp' % class_name)
out = open(hpp, 'w')
out.write('// Automatically generated file\n')
out.write('#ifndef __%s_HPP_\n' % class_name.upper())
out.write('#define __%s_HPP_\n' % class_name.upper())
out.write('#include"params.h"\n')
if export:
out.write('#include"gparams.h"\n')
out.write('struct %s {\n' % class_name)
out.write(' params_ref const & p;\n')
if export:
out.write(' params_ref g;\n')
out.write(' %s(params_ref const & _p = params_ref::get_empty()):\n' % class_name)
out.write(' p(_p)')
if export:
out.write(', g(gparams::get_module("%s"))' % module_name)
out.write(' {}\n')
out.write(' static void collect_param_descrs(param_descrs & d) {\n')
for param in params:
out.write(' d.insert("%s", %s, "%s", "%s","%s");\n' % (param[0], TYPE2CPK[param[1]], param[3], pyg_default(param), module_name))
out.write(' }\n')
if export:
out.write(' /*\n')
out.write(" REG_MODULE_PARAMS('%s', '%s::collect_param_descrs')\n" % (module_name, class_name))
if description != None:
out.write(" REG_MODULE_DESCRIPTION('%s', '%s')\n" % (module_name, description))
out.write(' */\n')
# Generated accessors
for param in params:
if export:
out.write(' %s %s() const { return p.%s("%s", g, %s); }\n' %
(TYPE2CTYPE[param[1]], to_c_method(param[0]), TYPE2GETTER[param[1]], param[0], pyg_default_as_c_literal(param)))
else:
out.write(' %s %s() const { return p.%s("%s", %s); }\n' %
(TYPE2CTYPE[param[1]], to_c_method(param[0]), TYPE2GETTER[param[1]], param[0], pyg_default_as_c_literal(param)))
out.write('};\n')
out.write('#endif\n')
if is_verbose():
print("Generated '%s'" % hpp)
def max_memory_param():
return ('max_memory', UINT, UINT_MAX, 'maximum amount of memory in megabytes')
def max_steps_param():
return ('max_steps', UINT, UINT_MAX, 'maximum number of steps')
PYG_GLOBALS = { 'UINT' : UINT, 'BOOL' : BOOL, 'DOUBLE' : DOUBLE, 'STRING' : STRING, 'SYMBOL' : SYMBOL,
'UINT_MAX' : UINT_MAX,
'max_memory_param' : max_memory_param,
'max_steps_param' : max_steps_param,
'def_module_params' : def_module_params }
def _execfile(file, globals=globals(), locals=locals()):
if sys.version < "2.7":
execfile(file, globals, locals)
else:
with open(file, "r") as fh:
exec(fh.read()+"\n", globals, locals)
# Execute python auxiliary scripts that generate extra code for Z3.
def exec_pyg_scripts():
global CURR_PYG
for root, dirs, files in os.walk('src'):
for f in files:
if f.endswith('.pyg'):
script = os.path.join(root, f)
CURR_PYG = script
_execfile(script, PYG_GLOBALS)
# TODO: delete after src/ast/pattern/expr_pattern_match
# database.smt ==> database.h
def mk_pat_db():
c = get_component(PATTERN_COMPONENT)
fin = open(os.path.join(c.src_dir, 'database.smt2'), 'r')
fout = open(os.path.join(c.src_dir, 'database.h'), 'w')
fout.write('static char const g_pattern_database[] =\n')
for line in fin:
fout.write('"%s\\n"\n' % line.strip('\n'))
fout.write(';\n')
if VERBOSE:
print("Generated '%s'" % os.path.join(c.src_dir, 'database.h'))
# Update version numbers
def update_version():
major = VER_MAJOR
minor = VER_MINOR
build = VER_BUILD
revision = VER_REVISION
if major == None or minor == None or build == None or revision == None:
raise MKException("set_version(major, minor, build, revision) must be used before invoking update_version()")
if not ONLY_MAKEFILES:
mk_version_dot_h(major, minor, build, revision)
mk_all_assembly_infos(major, minor, build, revision)
mk_def_files()
# Update files with the version number
def mk_version_dot_h(major, minor, build, revision):
c = get_component(UTIL_COMPONENT)
fout = open(os.path.join(c.src_dir, 'version.h'), 'w')
fout.write('// automatically generated file.\n')
fout.write('#define Z3_MAJOR_VERSION %s\n' % major)
fout.write('#define Z3_MINOR_VERSION %s\n' % minor)
fout.write('#define Z3_BUILD_NUMBER %s\n' % build)
fout.write('#define Z3_REVISION_NUMBER %s\n' % revision)
if VERBOSE:
print("Generated '%s'" % os.path.join(c.src_dir, 'version.h'))
# Generate AssemblyInfo.cs files with the right version numbers by using AssemblyInfo files as a template
def mk_all_assembly_infos(major, minor, build, revision):
for c in get_components():
if c.has_assembly_info():
assembly = os.path.join(c.src_dir, c.assembly_info_dir, 'AssemblyInfo')
if os.path.exists(assembly):
# It is a CS file
mk_assembly_info_version(assembly, major, minor, build, revision)
else:
raise MKException("Failed to find assembly info file 'AssemblyInfo' at '%s'" % os.path.join(c.src_dir, c.assembly_info_dir))
# Generate version number in the given 'AssemblyInfo.cs' file using 'AssemblyInfo' as a template.
def mk_assembly_info_version(assemblyinfo, major, minor, build, revision):
ver_pat = re.compile('[assembly: AssemblyVersion\("[\.\d]*"\) *')
fver_pat = re.compile('[assembly: AssemblyFileVersion\("[\.\d]*"\) *')
fin = open(assemblyinfo, 'r')
tmp = '%s.cs' % assemblyinfo
fout = open(tmp, 'w')
num_updates = 0
for line in fin:
if ver_pat.match(line):
fout.write('[assembly: AssemblyVersion("%s.%s.%s.%s")]\n' % (major, minor, build, revision))
num_updates = num_updates + 1
elif fver_pat.match(line):
fout.write('[assembly: AssemblyFileVersion("%s.%s.%s.%s")]\n' % (major, minor, build, revision))
num_updates = num_updates + 1
else:
fout.write(line)
# if VERBOSE:
# print("%s version numbers updated at '%s'" % (num_updates, assemblyinfo))
assert num_updates == 2, "unexpected number of version number updates"
fin.close()
fout.close()
if VERBOSE:
print("Updated '%s'" % assemblyinfo)
ADD_TACTIC_DATA=[]
ADD_PROBE_DATA=[]
def ADD_TACTIC(name, descr, cmd):
global ADD_TACTIC_DATA
ADD_TACTIC_DATA.append((name, descr, cmd))
def ADD_PROBE(name, descr, cmd):
global ADD_PROBE_DATA
ADD_PROBE_DATA.append((name, descr, cmd))
# Generate an install_tactics.cpp at path.
# This file implements the procedure
# void install_tactics(tactic_manager & ctx)
# It installs all tactics in the given component (name) list cnames
# The procedure looks for ADD_TACTIC commands in the .h files of these components.
def mk_install_tactic_cpp(cnames, path):
global ADD_TACTIC_DATA, ADD_PROBE_DATA
ADD_TACTIC_DATA = []
ADD_PROBE_DATA = []
fullname = os.path.join(path, 'install_tactic.cpp')
fout = open(fullname, 'w')
fout.write('// Automatically generated file.\n')
fout.write('#include"tactic.h"\n')
fout.write('#include"tactic_cmds.h"\n')
fout.write('#include"cmd_context.h"\n')
tactic_pat = re.compile('[ \t]*ADD_TACTIC\(.*\)')
probe_pat = re.compile('[ \t]*ADD_PROBE\(.*\)')
for cname in cnames:
c = get_component(cname)
h_files = filter(lambda f: f.endswith('.h') or f.endswith('.hpp'), os.listdir(c.src_dir))
for h_file in h_files:
added_include = False
fin = open(os.path.join(c.src_dir, h_file), 'r')
for line in fin:
if tactic_pat.match(line):
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
try:
exec(line.strip('\n '), globals())
except:
raise MKException("Failed processing ADD_TACTIC command at '%s'\n%s" % (fullname, line))
if probe_pat.match(line):
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
try:
exec(line.strip('\n '), globals())
except:
raise MKException("Failed processing ADD_PROBE command at '%s'\n%s" % (fullname, line))
# First pass will just generate the tactic factories
idx = 0
for data in ADD_TACTIC_DATA:
fout.write('MK_SIMPLE_TACTIC_FACTORY(__Z3_local_factory_%s, %s);\n' % (idx, data[2]))
idx = idx + 1
fout.write('#define ADD_TACTIC_CMD(NAME, DESCR, FACTORY) ctx.insert(alloc(tactic_cmd, symbol(NAME), DESCR, alloc(FACTORY)))\n')
fout.write('#define ADD_PROBE(NAME, DESCR, PROBE) ctx.insert(alloc(probe_info, symbol(NAME), DESCR, PROBE))\n')
fout.write('void install_tactics(tactic_manager & ctx) {\n')
idx = 0
for data in ADD_TACTIC_DATA:
fout.write(' ADD_TACTIC_CMD("%s", "%s", __Z3_local_factory_%s);\n' % (data[0], data[1], idx))
idx = idx + 1
for data in ADD_PROBE_DATA:
fout.write(' ADD_PROBE("%s", "%s", %s);\n' % data)
fout.write('}\n')
if VERBOSE:
print("Generated '%s'" % fullname)
def mk_all_install_tactic_cpps():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_install_tactics():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_install_tactic_cpp(cnames, c.src_dir)
# Generate an mem_initializer.cpp at path.
# This file implements the procedures
# void mem_initialize()
# void mem_finalize()
# These procedures are invoked by the Z3 memory_manager
def mk_mem_initializer_cpp(cnames, path):
initializer_cmds = []
finalizer_cmds = []
fullname = os.path.join(path, 'mem_initializer.cpp')
fout = open(fullname, 'w')
fout.write('// Automatically generated file.\n')
initializer_pat = re.compile('[ \t]*ADD_INITIALIZER\(\'([^\']*)\'\)')
# ADD_INITIALIZER with priority
initializer_prio_pat = re.compile('[ \t]*ADD_INITIALIZER\(\'([^\']*)\',[ \t]*(-?[0-9]*)\)')
finalizer_pat = re.compile('[ \t]*ADD_FINALIZER\(\'([^\']*)\'\)')
for cname in cnames:
c = get_component(cname)
h_files = filter(lambda f: f.endswith('.h') or f.endswith('.hpp'), os.listdir(c.src_dir))
for h_file in h_files:
added_include = False
fin = open(os.path.join(c.src_dir, h_file), 'r')
for line in fin:
m = initializer_pat.match(line)
if m:
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
initializer_cmds.append((m.group(1), 0))
m = initializer_prio_pat.match(line)
if m:
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
initializer_cmds.append((m.group(1), int(m.group(2))))
m = finalizer_pat.match(line)
if m:
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
finalizer_cmds.append(m.group(1))
initializer_cmds.sort(key=lambda tup: tup[1])
fout.write('void mem_initialize() {\n')
for (cmd, prio) in initializer_cmds:
fout.write(cmd)
fout.write('\n')
fout.write('}\n')
fout.write('void mem_finalize() {\n')
for cmd in finalizer_cmds:
fout.write(cmd)
fout.write('\n')
fout.write('}\n')
if VERBOSE:
print("Generated '%s'" % fullname)
def mk_all_mem_initializer_cpps():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_mem_initializer():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_mem_initializer_cpp(cnames, c.src_dir)
# Generate an mem_initializer.cpp at path.
# This file implements the procedure
# void gparams_register_modules()
# This procedure is invoked by gparams::init()
def mk_gparams_register_modules(cnames, path):
cmds = []
mod_cmds = []
mod_descrs = []
fullname = os.path.join(path, 'gparams_register_modules.cpp')
fout = open(fullname, 'w')
fout.write('// Automatically generated file.\n')
fout.write('#include"gparams.h"\n')
reg_pat = re.compile('[ \t]*REG_PARAMS\(\'([^\']*)\'\)')
reg_mod_pat = re.compile('[ \t]*REG_MODULE_PARAMS\(\'([^\']*)\', *\'([^\']*)\'\)')
reg_mod_descr_pat = re.compile('[ \t]*REG_MODULE_DESCRIPTION\(\'([^\']*)\', *\'([^\']*)\'\)')
for cname in cnames:
c = get_component(cname)
h_files = filter(lambda f: f.endswith('.h') or f.endswith('.hpp'), os.listdir(c.src_dir))
for h_file in h_files:
added_include = False
fin = open(os.path.join(c.src_dir, h_file), 'r')
for line in fin:
m = reg_pat.match(line)
if m:
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
cmds.append((m.group(1)))
m = reg_mod_pat.match(line)
if m:
if not added_include:
added_include = True
fout.write('#include"%s"\n' % h_file)
mod_cmds.append((m.group(1), m.group(2)))
m = reg_mod_descr_pat.match(line)
if m:
mod_descrs.append((m.group(1), m.group(2)))
fout.write('void gparams_register_modules() {\n')
for code in cmds:
fout.write('{ param_descrs d; %s(d); gparams::register_global(d); }\n' % code)
for (mod, code) in mod_cmds:
fout.write('{ param_descrs * d = alloc(param_descrs); %s(*d); gparams::register_module("%s", d); }\n' % (code, mod))
for (mod, descr) in mod_descrs:
fout.write('gparams::register_module_descr("%s", "%s");\n' % (mod, descr))
fout.write('}\n')
if VERBOSE:
print("Generated '%s'" % fullname)
def mk_all_gparams_register_modules():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_mem_initializer():
cnames = []
cnames.extend(c.deps)
cnames.append(c.name)
mk_gparams_register_modules(cnames, c.src_dir)
# Generate a .def based on the files at c.export_files slot.
def mk_def_file(c):
pat1 = re.compile(".*Z3_API.*")
defname = '%s.def' % os.path.join(c.src_dir, c.name)
fout = open(defname, 'w')
fout.write('LIBRARY "%s"\nEXPORTS\n' % c.dll_name)
num = 1
for dot_h in c.export_files:
dot_h_c = c.find_file(dot_h, c.name)
api = open(os.path.join(dot_h_c.src_dir, dot_h), 'r')
for line in api:
m = pat1.match(line)
if m:
words = re.split('\W+', line)
i = 0
for w in words:
if w == 'Z3_API':
f = words[i+1]
fout.write('\t%s @%s\n' % (f, num))
i = i + 1
num = num + 1
if VERBOSE:
print("Generated '%s'" % defname)
def mk_def_files():
if not ONLY_MAKEFILES:
for c in get_components():
if c.require_def_file():
mk_def_file(c)
def cp_z3py_to_build():
mk_dir(BUILD_DIR)
# Erase existing .pyc files
for root, dirs, files in os.walk(Z3PY_SRC_DIR):
for f in files:
if f.endswith('.pyc'):
rmf(os.path.join(root, f))
# Compile Z3Py files
if compileall.compile_dir(Z3PY_SRC_DIR, force=1) != 1:
raise MKException("failed to compile Z3Py sources")
# Copy sources to build
for py in filter(lambda f: f.endswith('.py'), os.listdir(Z3PY_SRC_DIR)):
shutil.copyfile(os.path.join(Z3PY_SRC_DIR, py), os.path.join(BUILD_DIR, py))
if is_verbose():
print("Copied '%s'" % py)
# Python 2.x support
for pyc in filter(lambda f: f.endswith('.pyc'), os.listdir(Z3PY_SRC_DIR)):
shutil.copyfile(os.path.join(Z3PY_SRC_DIR, pyc), os.path.join(BUILD_DIR, pyc))
if is_verbose():
print("Generated '%s'" % pyc)
# Python 3.x support
src_pycache = os.path.join(Z3PY_SRC_DIR, '__pycache__')
if os.path.exists(src_pycache):
for pyc in filter(lambda f: f.endswith('.pyc'), os.listdir(src_pycache)):
target_pycache = os.path.join(BUILD_DIR, '__pycache__')
mk_dir(target_pycache)
shutil.copyfile(os.path.join(src_pycache, pyc), os.path.join(target_pycache, pyc))
if is_verbose():
print("Generated '%s'" % pyc)
def mk_bindings(api_files):
if not ONLY_MAKEFILES:
mk_z3consts_py(api_files)
mk_z3consts_dotnet(api_files)
new_api_files = []
api = get_component(API_COMPONENT)
for api_file in api_files:
api_file_path = api.find_file(api_file, api.name)
new_api_files.append(os.path.join(api_file_path.src_dir, api_file))
g = globals()
g["API_FILES"] = new_api_files
if is_java_enabled():
check_java()
mk_z3consts_java(api_files)
_execfile(os.path.join('scripts', 'update_api.py'), g) # HACK
cp_z3py_to_build()
if is_ml_enabled():
check_ml()
mk_z3consts_ml(api_files)
# Extract enumeration types from API files, and add python definitions.
def mk_z3consts_py(api_files):
if Z3PY_SRC_DIR == None:
raise MKException("You must invoke set_z3py_dir(path):")
blank_pat = re.compile("^ *$")
comment_pat = re.compile("^ *//.*$")
typedef_pat = re.compile("typedef enum *")
typedef2_pat = re.compile("typedef enum { *")
openbrace_pat = re.compile("{ *")
closebrace_pat = re.compile("}.*;")
z3consts = open(os.path.join(Z3PY_SRC_DIR, 'z3consts.py'), 'w')
z3consts.write('# Automatically generated file\n\n')
api_dll = get_component(Z3_DLL_COMPONENT)
for api_file in api_files:
api_file_c = api_dll.find_file(api_file, api_dll.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
api = open(api_file, 'r')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid %s, line: %s" % (api_file, linenum)
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
z3consts.write('# enum %s\n' % name)
for k in decls:
i = decls[k]
z3consts.write('%s = %s\n' % (k, i))
z3consts.write('\n')
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
if VERBOSE:
print("Generated '%s'" % os.path.join(Z3PY_SRC_DIR, 'z3consts.py'))
# Extract enumeration types from z3_api.h, and add .Net definitions
def mk_z3consts_dotnet(api_files):
blank_pat = re.compile("^ *$")
comment_pat = re.compile("^ *//.*$")
typedef_pat = re.compile("typedef enum *")
typedef2_pat = re.compile("typedef enum { *")
openbrace_pat = re.compile("{ *")
closebrace_pat = re.compile("}.*;")
dotnet = get_component(DOTNET_COMPONENT)
DeprecatedEnums = [ 'Z3_search_failure' ]
z3consts = open(os.path.join(dotnet.src_dir, 'Enumerations.cs'), 'w')
z3consts.write('// Automatically generated file\n\n')
z3consts.write('using System;\n\n'
'#pragma warning disable 1591\n\n'
'namespace Microsoft.Z3\n'
'{\n');
for api_file in api_files:
api_file_c = dotnet.find_file(api_file, dotnet.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
api = open(api_file, 'r')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid %s, line: %s" % (api_file, linenum)
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
if name not in DeprecatedEnums:
z3consts.write(' /// <summary>%s</summary>\n' % name)
z3consts.write(' public enum %s {\n' % name)
z3consts.write
for k in decls:
i = decls[k]
z3consts.write(' %s = %s,\n' % (k, i))
z3consts.write(' }\n\n')
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
z3consts.write('}\n');
if VERBOSE:
print("Generated '%s'" % os.path.join(dotnet.src_dir, 'Enumerations.cs'))
# Extract enumeration types from z3_api.h, and add Java definitions
def mk_z3consts_java(api_files):
blank_pat = re.compile("^ *$")
comment_pat = re.compile("^ *//.*$")
typedef_pat = re.compile("typedef enum *")
typedef2_pat = re.compile("typedef enum { *")
openbrace_pat = re.compile("{ *")
closebrace_pat = re.compile("}.*;")
java = get_component(JAVA_COMPONENT)
DeprecatedEnums = [ 'Z3_search_failure' ]
gendir = os.path.join(java.src_dir, "enumerations")
if not os.path.exists(gendir):
os.mkdir(gendir)
for api_file in api_files:
api_file_c = java.find_file(api_file, java.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
api = open(api_file, 'r')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid %s, line: %s" % (api_file, linenum)
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
if name not in DeprecatedEnums:
efile = open('%s.java' % os.path.join(gendir, name), 'w')
efile.write('/**\n * Automatically generated file\n **/\n\n')
efile.write('package %s.enumerations;\n\n' % java.package_name);
efile.write('/**\n')
efile.write(' * %s\n' % name)
efile.write(' **/\n')
efile.write('public enum %s {\n' % name)
efile.write
first = True
for k in decls:
i = decls[k]
if first:
first = False
else:
efile.write(',\n')
efile.write(' %s (%s)' % (k, i))
efile.write(";\n")
efile.write('\n private final int intValue;\n\n')
efile.write(' %s(int v) {\n' % name)
efile.write(' this.intValue = v;\n')
efile.write(' }\n\n')
efile.write(' public static final %s fromInt(int v) {\n' % name)
efile.write(' for (%s k: values()) \n' % name)
efile.write(' if (k.intValue == v) return k;\n')
efile.write(' return values()[0];\n')
efile.write(' }\n\n')
efile.write(' public final int toInt() { return this.intValue; }\n')
# efile.write(';\n %s(int v) {}\n' % name)
efile.write('}\n\n')
efile.close()
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
if VERBOSE:
print("Generated '%s'" % ('%s' % gendir))
# Extract enumeration types from z3_api.h, and add ML definitions
def mk_z3consts_ml(api_files):
blank_pat = re.compile("^ *$")
comment_pat = re.compile("^ *//.*$")
typedef_pat = re.compile("typedef enum *")
typedef2_pat = re.compile("typedef enum { *")
openbrace_pat = re.compile("{ *")
closebrace_pat = re.compile("}.*;")
ml = get_component(ML_COMPONENT)
DeprecatedEnums = [ 'Z3_search_failure' ]
gendir = ml.src_dir
if not os.path.exists(gendir):
os.mkdir(gendir)
efile = open('%s.ml' % os.path.join(gendir, "z3enums"), 'w')
efile.write('(* Automatically generated file *)\n\n')
efile.write('(** The enumeration types of Z3. *)\n\n')
for api_file in api_files:
api_file_c = ml.find_file(api_file, ml.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
api = open(api_file, 'r')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid %s, line: %s" % (api_file, linenum)
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
if name not in DeprecatedEnums:
efile.write('(** %s *)\n' % name[3:])
efile.write('type %s =\n' % name[3:]) # strip Z3_
for k, i in decls.items():
efile.write(' | %s \n' % k[3:]) # strip Z3_
efile.write('\n')
efile.write('(** Convert %s to int*)\n' % name[3:])
efile.write('let int_of_%s x : int =\n' % (name[3:])) # strip Z3_
efile.write(' match x with\n')
for k, i in decls.items():
efile.write(' | %s -> %d\n' % (k[3:], i))
efile.write('\n')
efile.write('(** Convert int to %s*)\n' % name[3:])
efile.write('let %s_of_int x : %s =\n' % (name[3:],name[3:])) # strip Z3_
efile.write(' match x with\n')
for k, i in decls.items():
efile.write(' | %d -> %s\n' % (i, k[3:]))
# use Z3.Exception?
efile.write(' | _ -> raise (Failure "undefined enum value")\n\n')
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
if VERBOSE:
print ('Generated "%s/z3enums.ml"' % ('%s' % gendir))
efile = open('%s.mli' % os.path.join(gendir, "z3enums"), 'w')
efile.write('(* Automatically generated file *)\n\n')
efile.write('(** The enumeration types of Z3. *)\n\n')
for api_file in api_files:
api_file_c = ml.find_file(api_file, ml.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
api = open(api_file, 'r')
SEARCHING = 0
FOUND_ENUM = 1
IN_ENUM = 2
mode = SEARCHING
decls = {}
idx = 0
linenum = 1
for line in api:
m1 = blank_pat.match(line)
m2 = comment_pat.match(line)
if m1 or m2:
# skip blank lines and comments
linenum = linenum + 1
elif mode == SEARCHING:
m = typedef_pat.match(line)
if m:
mode = FOUND_ENUM
m = typedef2_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
elif mode == FOUND_ENUM:
m = openbrace_pat.match(line)
if m:
mode = IN_ENUM
decls = {}
idx = 0
else:
assert False, "Invalid %s, line: %s" % (api_file, linenum)
else:
assert mode == IN_ENUM
words = re.split('[^\-a-zA-Z0-9_]+', line)
m = closebrace_pat.match(line)
if m:
name = words[1]
if name not in DeprecatedEnums:
efile.write('(** %s *)\n' % name[3:])
efile.write('type %s =\n' % name[3:]) # strip Z3_
for k, i in decls.items():
efile.write(' | %s \n' % k[3:]) # strip Z3_
efile.write('\n')
efile.write('(** Convert %s to int*)\n' % name[3:])
efile.write('val int_of_%s : %s -> int\n' % (name[3:], name[3:])) # strip Z3_
efile.write('(** Convert int to %s*)\n' % name[3:])
efile.write('val %s_of_int : int -> %s\n' % (name[3:],name[3:])) # strip Z3_
efile.write('\n')
mode = SEARCHING
else:
if words[2] != '':
if len(words[2]) > 1 and words[2][1] == 'x':
idx = int(words[2], 16)
else:
idx = int(words[2])
decls[words[1]] = idx
idx = idx + 1
linenum = linenum + 1
if VERBOSE:
print ('Generated "%s/z3enums.mli"' % ('%s' % gendir))
def mk_gui_str(id):
return '4D2F40D8-E5F9-473B-B548-%012d' % id
def mk_vs_proj(name, components):
if not VS_PROJ:
return
proj_name = '%s.vcxproj' % os.path.join(BUILD_DIR, name)
modes=['Debug', 'Release']
PLATFORMS=['Win32']
f = open(proj_name, 'w')
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
f.write(' <ItemGroup Label="ProjectConfigurations">\n')
f.write(' <ProjectConfiguration Include="Debug|Win32">\n')
f.write(' <Configuration>Debug</Configuration>\n')
f.write(' <Platform>Win32</Platform>\n')
f.write(' </ProjectConfiguration>\n')
f.write(' <ProjectConfiguration Include="Release|Win32">\n')
f.write(' <Configuration>Release</Configuration>\n')
f.write(' <Platform>Win32</Platform>\n')
f.write(' </ProjectConfiguration>\n')
f.write(' </ItemGroup>\n')
f.write(' <PropertyGroup Label="Globals">\n')
f.write(' <ProjectGuid>{%s}</ProjectGuid>\n' % mk_gui_str(0))
f.write(' <ProjectName>%s</ProjectName>\n' % name)
f.write(' <Keyword>Win32Proj</Keyword>\n')
f.write(' </PropertyGroup>\n')
f.write(' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\n')
f.write(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'" Label="Configuration">\n')
f.write(' <ConfigurationType>Application</ConfigurationType>\n')
f.write(' <CharacterSet>Unicode</CharacterSet>\n')
f.write(' <UseOfMfc>false</UseOfMfc>\n')
f.write(' </PropertyGroup>\n')
f.write(' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\n')
f.write(' <ImportGroup Label="ExtensionSettings">\n')
f.write(' </ImportGroup>\n')
f.write(' <ImportGroup Label="PropertySheets">\n')
f.write(' <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists(\'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props\')" Label="LocalAppDataPlatform" /> </ImportGroup>\n')
f.write(' <PropertyGroup Label="UserMacros" />\n')
f.write(' <PropertyGroup>\n')
f.write(' <OutDir Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">$(SolutionDir)$(Configuration)\</OutDir>\n')
f.write(' <TargetName Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">%s</TargetName>\n' % name)
f.write(' <TargetExt Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">.exe</TargetExt>\n')
f.write(' <OutDir Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">$(SolutionDir)$(Configuration)\</OutDir>\n')
f.write(' <TargetName Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">%s</TargetName>\n' % name)
f.write(' <TargetExt Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">.exe</TargetExt>\n')
f.write(' </PropertyGroup>\n')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Debug|Win32\'">\n')
f.write(' <ClCompile>\n')
f.write(' <Optimization>Disabled</Optimization>\n')
f.write(' <PreprocessorDefinitions>WIN32;_DEBUG;Z3DEBUG;_TRACE;_MP_INTERNAL;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n')
if VS_PAR:
f.write(' <MinimalRebuild>false</MinimalRebuild>\n')
f.write(' <MultiProcessorCompilation>true</MultiProcessorCompilation>\n')
else:
f.write(' <MinimalRebuild>true</MinimalRebuild>\n')
f.write(' <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n')
f.write(' <WarningLevel>Level3</WarningLevel>\n')
f.write(' <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n')
f.write(' <OpenMPSupport>true</OpenMPSupport>\n')
f.write(' <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n')
f.write(' <AdditionalIncludeDirectories>')
deps = find_all_deps(name, components)
first = True
for dep in deps:
if first:
first = False
else:
f.write(';')
f.write(get_component(dep).to_src_dir)
f.write('</AdditionalIncludeDirectories>\n')
f.write(' </ClCompile>\n')
f.write(' <Link>\n')
f.write(' <OutputFile>$(OutDir)%s.exe</OutputFile>\n' % name)
f.write(' <GenerateDebugInformation>true</GenerateDebugInformation>\n')
f.write(' <SubSystem>Console</SubSystem>\n')
f.write(' <StackReserveSize>8388608</StackReserveSize>\n')
f.write(' <RandomizedBaseAddress>false</RandomizedBaseAddress>\n')
f.write(' <DataExecutionPrevention>\n')
f.write(' </DataExecutionPrevention>\n')
f.write(' <TargetMachine>MachineX86</TargetMachine>\n')
f.write(' <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n')
f.write('<AdditionalDependencies>psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n')
f.write(' </Link>\n')
f.write(' </ItemDefinitionGroup>\n')
f.write(' <ItemDefinitionGroup Condition="\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">\n')
f.write(' <ClCompile>\n')
f.write(' <Optimization>Disabled</Optimization>\n')
f.write(' <PreprocessorDefinitions>WIN32;_NDEBUG;_MP_INTERNAL;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n')
if VS_PAR:
f.write(' <MinimalRebuild>false</MinimalRebuild>\n')
f.write(' <MultiProcessorCompilation>true</MultiProcessorCompilation>\n')
else:
f.write(' <MinimalRebuild>true</MinimalRebuild>\n')
f.write(' <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n')
f.write(' <WarningLevel>Level3</WarningLevel>\n')
f.write(' <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n')
f.write(' <OpenMPSupport>true</OpenMPSupport>\n')
f.write(' <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n')
f.write(' <AdditionalIncludeDirectories>')
deps = find_all_deps(name, components)
first = True
for dep in deps:
if first:
first = False
else:
f.write(';')
f.write(get_component(dep).to_src_dir)
f.write('</AdditionalIncludeDirectories>\n')
f.write(' </ClCompile>\n')
f.write(' <Link>\n')
f.write(' <OutputFile>$(OutDir)%s.exe</OutputFile>\n' % name)
f.write(' <GenerateDebugInformation>true</GenerateDebugInformation>\n')
f.write(' <SubSystem>Console</SubSystem>\n')
f.write(' <StackReserveSize>8388608</StackReserveSize>\n')
f.write(' <RandomizedBaseAddress>false</RandomizedBaseAddress>\n')
f.write(' <DataExecutionPrevention>\n')
f.write(' </DataExecutionPrevention>\n')
f.write(' <TargetMachine>MachineX86</TargetMachine>\n')
f.write(' <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n')
f.write('<AdditionalDependencies>psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n')
f.write(' </Link>\n')
f.write(' </ItemDefinitionGroup>\n')
f.write(' <ItemGroup>\n')
for dep in deps:
dep = get_component(dep)
for cpp in filter(lambda f: f.endswith('.cpp'), os.listdir(dep.src_dir)):
f.write(' <ClCompile Include="%s" />\n' % os.path.join(dep.to_src_dir, cpp))
f.write(' </ItemGroup>\n')
f.write(' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\n')
f.write(' <ImportGroup Label="ExtensionTargets">\n')
f.write(' </ImportGroup>\n')
f.write('</Project>\n')
if is_verbose():
print("Generated '%s'" % proj_name)
def mk_win_dist(build_path, dist_path):
for c in get_components():
c.mk_win_dist(build_path, dist_path)
# Add Z3Py to bin directory
print("Adding to %s\n" % dist_path)
for pyc in filter(lambda f: f.endswith('.pyc') or f.endswith('.py'), os.listdir(build_path)):
shutil.copy(os.path.join(build_path, pyc),
os.path.join(dist_path, 'bin', pyc))
def mk_unix_dist(build_path, dist_path):
for c in get_components():
c.mk_unix_dist(build_path, dist_path)
# Add Z3Py to bin directory
for pyc in filter(lambda f: f.endswith('.pyc') or f.endswith('.py'), os.listdir(build_path)):
shutil.copy(os.path.join(build_path, pyc),
os.path.join(dist_path, 'bin', pyc))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
traiansf/z3-java
|
scripts/mk_util.py
|
Python
|
mit
| 123,782
|
[
"cclib"
] |
698889b8287ace66e5a181923dd736993473bc8a54df4823afd19538e94a615e
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Test feature reader and Tica with a set of cosine time series.
@author: Fabian Paul
'''
from __future__ import print_function
from __future__ import absolute_import
import unittest
import os
import tempfile
import numpy as np
import mdtraj
from pyemma.coordinates import api
from pyemma.coordinates.data.feature_reader import FeatureReader
from logging import getLogger
log = getLogger('pyemma.'+'TestFeatureReaderAndTICA')
class TestFeatureReaderAndTICA(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dim = 9 # dimension (must be divisible by 3)
N = 50000 # length of single trajectory # 500000
N_trajs = 10 # number of trajectories
cls.w = 2.0*np.pi*1000.0/N # have 1000 cycles in each trajectory
# get random amplitudes and phases
cls.A = np.random.randn(cls.dim)
cls.phi = np.random.random_sample((cls.dim,))*np.pi*2.0
mean = np.random.randn(cls.dim)
# create topology file
cls.temppdb = tempfile.mktemp('.pdb')
with open(cls.temppdb, 'w') as f:
for i in range(cls.dim//3):
print(('ATOM %5d C ACE A 1 28.490 31.600 33.379 0.00 1.00' % i), file=f)
t = np.arange(0, N)
t_total = 0
cls.trajnames = [] # list of xtc file names
for i in range(N_trajs):
# set up data
data = cls.A*np.cos((cls.w*(t+t_total))[:, np.newaxis]+cls.phi) + mean
xyz = data.reshape((N, cls.dim//3, 3))
# create trajectory file
traj = mdtraj.load(cls.temppdb)
traj.xyz = xyz
traj.time = t
tempfname = tempfile.mktemp('.xtc')
traj.save(tempfname)
cls.trajnames.append(tempfname)
t_total += N
@classmethod
def tearDownClass(cls):
for fname in cls.trajnames:
os.unlink(fname)
os.unlink(cls.temppdb)
super(TestFeatureReaderAndTICA, cls).tearDownClass()
def test_covariances_and_eigenvalues(self):
reader = FeatureReader(self.trajnames, self.temppdb, chunksize=10000)
for lag in [1, 11, 101, 1001, 2001]: # avoid cos(w*tau)==0
trans = api.tica(data=reader, dim=self.dim, lag=lag)
log.info('number of trajectories reported by tica %d' % trans.number_of_trajectories())
log.info('tau = %d corresponds to a number of %f cycles' % (lag, self.w*lag/(2.0*np.pi)))
# analytical solution for C_ij(lag) is 0.5*A[i]*A[j]*cos(phi[i]-phi[j])*cos(w*lag)
ana_cov = 0.5*self.A[:, np.newaxis]*self.A*np.cos(self.phi[:, np.newaxis]-self.phi)
ana_cov_tau = ana_cov*np.cos(self.w*lag)
self.assertTrue(np.allclose(ana_cov, trans.cov, atol=1.E-3))
self.assertTrue(np.allclose(ana_cov_tau, trans.cov_tau, atol=1.E-3))
log.info('max. eigenvalue: %f' % np.max(trans.eigenvalues))
self.assertTrue(np.all(trans.eigenvalues <= 1.0))
def test_partial_fit(self):
reader = FeatureReader(self.trajnames, self.temppdb, chunksize=10000)
output = reader.get_output()
params = {'dim': self.dim, 'lag': 1001}
ref = api.tica(reader, **params)
partial = api.tica(**params)
for traj in output:
partial.partial_fit(traj)
np.testing.assert_allclose(partial.eigenvalues, ref.eigenvalues, atol=1e-3)
# only compare first two eigenvectors, because we only have two metastable processes
np.testing.assert_allclose(np.abs(partial.eigenvectors[:2]),
np.abs(ref.eigenvectors[:2]), rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
unittest.main()
|
marscher/PyEMMA
|
pyemma/coordinates/tests/test_featurereader_and_tica.py
|
Python
|
lgpl-3.0
| 4,496
|
[
"MDTraj"
] |
0421315e8b1d0ae8c2975c090ce1b9b81a69718ff29aac52accad7c163dc797d
|
import os
from os.path import dirname, join
from collections import OrderedDict
import pandas as pd
import numpy as np
import json
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column, gridplot, layout
from bokeh.models import Select, Div, Column, \
HoverTool, ColumnDataSource, Button, RadioButtonGroup,\
MultiSelect
#from bokeh.models.widgets import RangeSlider
from bokeh.plotting import figure
from bokeh.sampledata.periodic_table import elements
from precision.precisions import DatabaseData
import requests
plottables = ['k-point', 'value', 'perc_precisions']
x_select = Select(title='X-Axis', value='k-point', options=plottables)
y_select = Select(title='Y-Axis', value='value', options=plottables)
############## Header Content from description.html #################
content_filename1 = join(dirname(__file__), "ptable.html")
description1 = Div(text=open(content_filename1).read(),
render_as_text=False, width=600)
content_filename2 = join(dirname(__file__), "UserInstructions.html")
description2 = Div(text=open(content_filename2).read(),
render_as_text=False, width=600)
# periodic table widget
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
print("Table---")
#print(elements.period)
print("---Table")
try:
elements["period"] = [romans[x-1] for x in elements.period]
except:
pass
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {'H':'nc','He':'nc',
'Li':'nc','Be':'nc','B':'nc','C':'nc', 'N':'nc', 'O':'nc','F':'nc','Ne':'nc',
'Na':'nc','Mg':'nc', 'Al':'c','Si':'nc','P':'nc','S':'nc','Cl':'nc','Ar':'nc',
'K': 'nc', 'Ca':'nc','Sc':'c', 'Ti':'c' ,'V':'c' , 'Cr':'c', 'Mn':'c', 'Fe':'c', 'Co':'c', 'Ni':'c', 'Cu':'c', 'Zn':'c',
'Rb':'nc', 'Sr':'nc','Y':'c', 'Zr':'c', 'Nb':'c', 'Mo':'c', 'Tc':'c', 'Ru':'c', 'Rh':'c', 'Pd':'c', 'Ag':'c','Cd': 'c',
'Cs':'nc', 'Ba':'nc', 'Hf':'c', 'Ta':'c', 'W':'c', 'Re':'c', 'Os':'c', 'Ir':'c', 'Pt':'c', 'Au':'c', 'Hg':'c'
}
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
# mass=elements["atomic mass"],
B=['B' for x in elements["atomic mass"]],
dB=['dB' for x in elements["atomic mass"]],
V0=['V0' for x in elements["atomic mass"]],
E0=['E0' for x in elements["atomic mass"]],
# type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
#name = source.data["name"]
#B = source.data["B"]
# Display Table
#ptable1 = figure(title="Periodic Table", tools="hover",
# x_range=group_range, y_range=list(reversed(romans)))
#ptable1.plot_width = 1500
#ptable1.toolbar_location = None
#ptable1.outline_line_color = None
#ptable1.background_fill_color = 'white'
#ptable1.rect("group", "period", 0.9, 0.9, source=source,
# fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
#ptable1.text(x="symx", y="period", text="sym",
# text_font_style="bold", text_font_size="22pt", **text_props)
#ptable1.text(x="symx", y="numbery", text="atomic_number",
# text_font_size="9pt", **text_props)
#ptable1.grid.grid_line_color = None
#ptable1.select_one(HoverTool).tooltips = [
# ("name", "@name"),
# ("V0 (A^3 per atom)", "@V0"),
# ("B (GPa)", "@B"),
# ("dB/dP", "@dB")
#]
#Interactive table
ptable2 = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable2.plot_width = 1500
ptable2.toolbar_location = None
ptable2.outline_line_color = None
ptable2.background_fill_color = 'white'
ptable2.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
ptable2.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable2.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
ptable2.grid.grid_line_color = None
ptable2.select_one(HoverTool).tooltips = [
("name", "@name"),
("V0 (A^3 per atom)", "@V0"),
("B (GPa)", "@B"),
("dB/dP", "@dB")
]
######### CREATES CROSSFILTER ##########################
# decide if all columns or crossfilter down to sub properties
#source_data = pd.DataFrame({})#ColumnDataSource(data=dict())
class CrossFiltDFs():
def __init__(self,query_dict={'code':'VASP','exchange':'PBE',\
'element':'Au','structure':'fcc','properties':'B'},plot_data=None):
self.query_dict = query_dict
self.plot_data = plot_data
def crossfilter_by_tag(self,df, tag):
"""
a crossfilter that can recursivly update the unique options
in the UI based on prioir selections
returns crossfiltered by tag crossfilter {'element': 'Ag'}
"""
col,spec= list(tag.items())[0]
return df[df[col]==spec]
def query_ptable_api(self,endpoint):
r = requests.post(url='http://0.0.0.0:6400/bench/v1/query_{}'.\
format(endpoint),data=json.dumps(self.query_dict))
ListOfDicts = r.json()['content']
self.plot_data = pd.concat([pd.DataFrame({k:[ld[k]] for k in list(ld.keys())}) for ld in ListOfDicts])
def query_api(self,endpoint):
query_dict ={k:v for k,v in self.query_dict.items() if k!='properties'}
self.properties = self.query_dict['properties']
if self.properties == 'dB':
self.properties = 'BP'
r = requests.post(url='http://0.0.0.0:6400/bench/v1/query_{}'.\
format(endpoint),data=json.dumps(self.query_dict))
ListOfDicts = r.json()['content']
self.plot_data = pd.concat([pd.DataFrame({k:[ld[k]] for k in list(ld.keys())}) for ld in ListOfDicts])
def create_figure_new(self):
"""
create a new multi-figure canvas
"""
kw = {}
self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)
self.p.circle(x=[0],y=[0])
def update_ptable(self):
"""
update the periodic table highlighted elements
"""
from bokeh.sampledata.periodic_table import elements
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
elements["period"] = [x for x in elements.period]
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
print ('reaches colormap def')
colormap = {
"c" : "#ffa07a",
"nc" : "#A9A9A9"
}
elems_colorpair = {}
fcc_B_extrapol_props = {}
fcc_dB_extrapol_props = {}
fcc_V0_extrapol_props = {}
fcc_E0_extrapol_props = {}
bcc_B_extrapol_props = {}
bcc_dB_extrapol_props = {}
bcc_V0_extrapol_props = {}
bcc_E0_extrapol_props = {}
hcp_B_extrapol_props = {}
hcp_dB_extrapol_props = {}
hcp_V0_extrapol_props = {}
hcp_E0_extrapol_props = {}
available_elems = []
for e in elements["symbol"]:
if e in np.unique(list(self.plot_data['element'])):
available_elems.append(e)
for s in np.unique(list(self.plot_data['structure'])):
plot_struct = self.plot_data[self.plot_data['structure']==s]
plot_struct_elem = plot_struct[plot_struct['element']==e]
if s=='fcc':
try:
fcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
fcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
fcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
fcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
elif s=='bcc':
try:
bcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
bcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
bcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
bcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
elif s=='hcp':
try:
hcp_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})
hcp_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})
hcp_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})
hcp_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})
except:
pass
fcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_E0_extrapol_props})
fcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_V0_extrapol_props})
fcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_B_extrapol_props})
fcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_dB_extrapol_props})
bcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_E0_extrapol_props})
bcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_V0_extrapol_props})
bcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_B_extrapol_props})
bcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_dB_extrapol_props})
hcp_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_E0_extrapol_props})
hcp_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_V0_extrapol_props})
hcp_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_B_extrapol_props})
hcp_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_dB_extrapol_props})
elems_colorpair.update( { key:'c' for key in np.unique(available_elems) } )
elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )
print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
# cpk=elements["CPK"],
atomic_number=elements["atomic number"],
# electronic=elements["electronic configuration"],
fcc_B=[fcc_B_extrapol_props[x] for x in elements["symbol"]],
fcc_dB=[fcc_dB_extrapol_props[x] for x in elements["symbol"]],
fcc_V0=[fcc_V0_extrapol_props[x] for x in elements["symbol"]],
fcc_E0=[fcc_E0_extrapol_props[x] for x in elements["symbol"]],
bcc_B=[bcc_B_extrapol_props[x] for x in elements["symbol"]],
bcc_dB=[bcc_dB_extrapol_props[x] for x in elements["symbol"]],
bcc_V0=[bcc_V0_extrapol_props[x] for x in elements["symbol"]],
bcc_E0=[bcc_E0_extrapol_props[x] for x in elements["symbol"]],
hcp_B=[hcp_B_extrapol_props[x] for x in elements["symbol"]],
hcp_dB=[hcp_dB_extrapol_props[x] for x in elements["symbol"]],
hcp_V0=[hcp_V0_extrapol_props[x] for x in elements["symbol"]],
hcp_E0=[hcp_E0_extrapol_props[x] for x in elements["symbol"]],
type=elements["metal"],
type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],
)
)
# plot the periodic layout
#name = source.data["name"]
#B = source.data["B"]
ptable = figure(title="Periodic Table", tools="hover",
x_range=group_range, y_range=list(reversed(romans)))
ptable.background_fill_color='white'
ptable.plot_width = 1500
ptable.toolbar_location = None
ptable.outline_line_color = None
ptable.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.3, color='type_color')
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
ptable.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="22pt", **text_props)
ptable.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
# ptable.text(x="symx", y="namey", text="name",
# text_font_size="6pt", **text_props)
# ptable.text(x="symx", y="massy", text="mass",
# text_font_size="5pt", **text_props)
ptable.grid.grid_line_color = None
ptable.select_one(HoverTool).tooltips = [
("name", "@name"),
("fcc, V0 (A^3 per atom)", "@fcc_V0"),
("fcc, B (GPa)", "@fcc_B"),
("fcc, dB/dP", "@fcc_dB"),
("bcc, V0 (A^3 per atom)", "@bcc_V0"),
("bcc, B (GPa)", "@bcc_B"),
("bcc, dB/dP", "@bcc_dB"),
("hcp, V0 (A^3 per atom)", "@hcp_V0"),
("hcp, B (GPa)", "@hcp_B"),
("hcp, dB/dP", "@hcp_dB")]
return ptable
# The crossfilter widgets
def update(self, attr, old, new):
print ('Attribute', attr, 'OLD', old, 'NEW', new)
print ('executes here on update')#, exchange_df)
def update_code(self):
"""
update for the code selection
"""
print ('update code')
self.query_dict.update({'code':code.value})
def update_exchange(self):
"""
update the exchange
"""
print ('update exchange')
self.query_dict.update({'exchange':exchange.value})
def update_element(self,new):
print ('Updating element down selection for properties',element.value)
self.query_dict.update({'element':element.value})
def update_struct(self):
print ('Updating struct down selection for element')
self.query_dict.update({'structure':struct.value})
print ('Updating ptable with structure selection')
print ('finished callback to update layout')
def update_prop(self):
self.properties = prop.value
def plot_prec_value1(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
# self.query_dict={'code':code.value,'exchange':exchange.value,\
# 'structure':struct.value,'element':element.value,'properties':prop.value}
# print ('POSTING', self.query_dict)
# self.query_api(endpoint='evk')
#layout_doc.children[4].children[0] = self.plot_pade_figure()
self.query_dict={'code':code.value,'exchange':exchange.value,\
'structure':struct.value,'element':element.value,'properties':prop.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[4].children[0] = self.plot_pade_figure()
def clear_crossfilter1(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[4].children[0] = self.p
def plot_prec_value2(self):
"""
calls the plotting operation by querying the
evk endpoint and returning a single evk packet
of single material structure code exchange to
self.plot_data.
This controls the first plot canvas
"""
self.query_dict={'code':code2.value,'exchange':exchange2.value,\
'structure':struct2.value,'element':element2.value,'properties':prop2.value}
print ('POSTING', self.query_dict)
self.query_api(endpoint='evk')
layout_doc.children[4].children[1] = self.plot_pade_figure()
def clear_crossfilter2(self):
"""
clear the figure and crossfilter
"""
print ('Trigger clear')
self.query_dict = {}
self.plot_data = None
self.create_figure_new()
layout_doc.children[4].children[1] = self.p
def plot_pade_figure(self):
"""
method which plots multiple curves of different color
on the same bokeh figure canvas. Will receive query results from the evk
end point on the E0k, V0k, Bk, BPk, kpoints data. x is always kpoints data log scaled
"""
data_analysis = DatabaseData(dataframe=self.plot_data)
print (data_analysis.dataframe.columns)
data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)
data_analysis.create_precisions()
data_analysis.extract_pade_curve()
x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \
data_analysis.create_pade_bokeh_compat(properties=self.properties)
print (type(self.properties), self.properties)
if self.properties == 'B':
ext = data_analysis.Bp
print ('HERE AT PROPERTIES', ext, type(ext))
elif self.properties == 'BP':
ext = data_analysis.BPp
elif self.properties == 'E0':
ext = data_analysis.E0p
elif self.properties == 'V0':
ext = data_analysis.V0p
p = figure(plot_height=400, plot_width=400,tools="pan,wheel_zoom,box_zoom,reset,previewsave",\
x_axis_type="log", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )
p.xaxis.axis_label = 'K-points per atom'
p.line(x_pade_kpts, y_pade, color='red')
p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)
p.multi_line(xs_err, ys_err, color='black')
if self.properties == 'B':
p.yaxis.axis_label = 'Bulk Modulus B (GPa)'
elif self.properties == 'dB':
p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'
elif self.properties == 'E0':
p.yaxis.axis_label = 'DFT Energy (eV/atom)'
elif self.properties == 'V0':
p.yaxis.axis_label = 'Volume (A^3/atom)'
return p
def plot_precision_figure(self):
"""
method which plots multiple curves of different color
on the same bokeh figure canvas. Will receive query results from the evk
end point on the E0k, V0k, Bk, BPk, kpoints data. x is always kpoints data log scaled
"""
data_analysis = DatabaseData(dataframe=self.plot_data)
prop_data, energy_data, M, C, pred_energy, pred_property = \
data_analysis.create_precision_bokeh_compat(self.prop_data, self.energy_data, properties=self.properties)
p = figure(plot_height=400, plot_width=400,tools="pan,wheel_zoom,box_zoom,reset,previewsave",\
x_axis_type="log", y_axis_type="log", x_axis_label='Energy Convergence (meV/atom)', title='Slope M is {0}'.format(str(M)) )
p.line(pred_energy, pred_property, color='red')
p.circle(self.energy_data, self.prop_data, color='blue',size=5, line_alpha=0)
#p.multi_line(xs_err, ys_err, color='black')
if self.properties == 'B':
p.yaxis.axis_label = 'Bulk Modulus B (%)'
elif self.properties == 'dB':
p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative (%)'
elif self.properties == 'Multiple':
p.yaxis.axis_label = "V0, B, B' (%)"
elif self.properties == 'V0':
p.yaxis.axis_label = 'Volume (%)'
return p
def multi_precisions_correlate1(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code.value,'exchange':exchange.value,\
'structure':struct.value,'element':element.value,'properties':prop.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data['s{}k'.format(self.properties)]
self.energy_data = self.plot_data['sE0k'.format(self.properties)]
layout_doc.children[4].children[0] = self.plot_precision_figure()
pass
def multi_precisions_correlate2(self):
"""
method which allows the user to plot various precisions
against each other. Also prints out the M-value and intercepts
of precision at 1 meV/atom, 0.1 meV/atom and 0.01 meV/atom
"""
self.query_dict={'code':code2.value,'exchange':exchange2.value,\
'structure':struct2.value,'element':element2.value,'properties':prop2.value}
print ('POSTING', self.query_dict)
if not self.query_dict['properties'] == 'Multi':
self.query_api(endpoint='precvalue')
self.prop_data = self.plot_data['s{}k'.format(self.properties)]
self.energy_data = self.plot_data['sE0k'.format(self.properties)]
layout_doc.children[4].children[1] = self.plot_precision_figure()
## PTABLE
CF = CrossFiltDFs(query_dict={'code':'VASP','exchange':'PBE'})
# first query for the periodic table data
CF.query_ptable_api(endpoint='extrapolate')
print (CF.plot_data)
# for the first table to display VASP PBE all structures Pade extrapolates for all properties
# as a bonus with some error bar too
ptable1 = CF.update_ptable()
## PLOT 1
CF1 = CrossFiltDFs()
codes = ['DMol3','VASP']
code = Select(title='Code 1', value='VASP', options=codes)
code.on_change('value', lambda attr, old, new: CF1.update_code())
exchanges = ['LDA','PBE']
exchange = Select(title='ExchangeCorrelation 1', value='PBE', options=exchanges)
exchange.on_change('value', lambda attr, old, new: CF1.update_exchange())
structures = ['fcc','bcc','hcp']
struct = Select(title='Structure 1', value='fcc', options=structures)
struct.on_change('value', lambda attr, old, new: CF1.update_struct())
_elements = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element = Select(title='Metals 1', value='Pt', options=_elements)
element.on_change('value', lambda attr, old, new: CF1.update_element())
properties = ['B','dB','V0','E0']
prop = Select(title='Properties 1', value='E0', options=properties)
prop.on_change('value', lambda attr, old, new: CF1.update_prop())
#range_slider_lowK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_medK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_highK1 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
apply_crossfilter = Button(label='Values vs. Kpoints')
apply_crossfilter.on_click(CF1.plot_prec_value1)
apply_precision = Button(label='Inter-Property Precision')
apply_precision.on_click(CF1.multi_precisions_correlate1)
clean_crossfilter = Button(label='Clear')
clean_crossfilter.on_click(CF1.clear_crossfilter1)
CF1.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'E0'}
## PLOT 2
CF2 = CrossFiltDFs()
codes2 = ['DMol3','VASP']
code2 = Select(title='Code 2', value='VASP', options=codes2)
code2.on_change('value', lambda attr, old, new: CF2.update_code())
exchanges2 = ['LDA','PBE']
exchange2 = Select(title='ExchangeCorrelation 2', value='PBE', options=exchanges2)
exchange2.on_change('value', lambda attr, old, new: CF2.update_exchange())
structures2 = ['fcc','bcc','hcp']
struct2 = Select(title='Structure 2', value='fcc', options=structures2)
struct2.on_change('value', lambda attr, old, new: CF2.update_struct())
_elements2 = ['Al','Au','Sc', 'Ti','V','Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Rb', 'Sr','Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag','Cd',
'Cs','Ba','Hf','Ta','W','Re','Os','Ir','Pt','Hg']
element2 = Select(title='Metals 2', value='Pt', options=_elements2)
element2.on_change('value', lambda attr, old, new: CF2.update_element())
properties2 = ['B','dB','V0','E0']
prop2 = Select(title='Properties 2', value='V0', options=properties2)
prop2.on_change('value', lambda attr, old, new: CF2.update_prop())
#range_slider_lowK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Low K-point")
#range_slider_medK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="Medium K-point")
#range_slider_highK2 = RangeSlider(start=-5, end=5, value=(-5,5), step=1, title="High K-point")
apply_crossfilter2 = Button(label='Values vs. Kpoints')
apply_crossfilter2.on_click(CF2.plot_prec_value2)
clean_crossfilter2 = Button(label='Clear')
clean_crossfilter2.on_click(CF2.clear_crossfilter2)
apply_precision2 = Button(label='Inter-Property Precision')
apply_precision2.on_click(CF1.multi_precisions_correlate2)
CF2.query_dict={'code':'VASP','exchange':'PBE',\
'structure':'fcc','element':'Pt','properties':'V0'}
CF1.create_figure_new()
CF2.create_figure_new()
controls1 = widgetbox([code, exchange, struct, element, prop, apply_crossfilter, apply_precision, clean_crossfilter],width=400)
#range_slider_lowK1, range_slider_medK1, range_slider_highK1], width=300)
controls2 = widgetbox([code2, exchange2, struct2, element2, prop2,apply_crossfilter2, apply_precision2, clean_crossfilter2],width=400)
#range_slider_lowK2, range_slider_medK2, range_slider_highK2, width=300)
#layout_doc = column(description1, ptable1, description2)
layout_doc = layout([description1],[ptable1],[description2],[controls1, controls2], [CF1.p, CF2.p])
#layout_doc = layout([description1],\
# [ptable1],\
# [description2],\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# row([element,code,exchange,struct]),\
# sizing_mode='stretch_both'
# )
#column(description1, ptable1, description2, controls1, ptable2, controls2)
print ('executed till here')
curdoc().add_root(layout_doc)
curdoc().title = "DFT Benchmark"
CF1.plot_prec_value1()
CF2.plot_prec_value2()
|
joshgabriel/dft-crossfilter
|
CompleteApp/crossfilter_prec_app/main.py
|
Python
|
mit
| 28,527
|
[
"DMol3",
"VASP"
] |
4e7fa96f435c4f59f352f40bae6e4bf8ca34d207943171fd7e418850f8ecdfe8
|
"""Convolutional/Variational autoencoder, including demonstration of
training such a network on MNIST, CelebNet and the film, "Sita Sings The Blues"
using an image pipeline.
Copyright Parag K. Mital, January 2016
"""
import tensorflow as tf
import numpy as np
import os
from libs.dataset_utils import create_input_pipeline
from libs.datasets import CELEB, MNIST
from libs.batch_norm import batch_norm
from libs import utils
def VAE(input_shape=[None, 784],
n_filters=[64, 64, 64],
filter_sizes=[4, 4, 4],
n_hidden=32,
n_code=2,
activation=tf.nn.tanh,
dropout=False,
denoising=False,
convolutional=False,
variational=False):
"""(Variational) (Convolutional) (Denoising) Autoencoder.
Uses tied weights.
Parameters
----------
input_shape : list, optional
Shape of the input to the network. e.g. for MNIST: [None, 784].
n_filters : list, optional
Number of filters for each layer.
If convolutional=True, this refers to the total number of output
filters to create for each layer, with each layer's number of output
filters as a list.
If convolutional=False, then this refers to the total number of neurons
for each layer in a fully connected network.
filter_sizes : list, optional
Only applied when convolutional=True. This refers to the ksize (height
and width) of each convolutional layer.
n_hidden : int, optional
Only applied when variational=True. This refers to the first fully
connected layer prior to the variational embedding, directly after
the encoding. After the variational embedding, another fully connected
layer is created with the same size prior to decoding. Set to 0 to
not use an additional hidden layer.
n_code : int, optional
Only applied when variational=True. This refers to the number of
latent Gaussians to sample for creating the inner most encoding.
activation : function, optional
Activation function to apply to each layer, e.g. tf.nn.relu
dropout : bool, optional
Whether or not to apply dropout. If using dropout, you must feed a
value for 'keep_prob', as returned in the dictionary. 1.0 means no
dropout is used. 0.0 means every connection is dropped. Sensible
values are between 0.5-0.8.
denoising : bool, optional
Whether or not to apply denoising. If using denoising, you must feed a
value for 'corrupt_prob', as returned in the dictionary. 1.0 means no
corruption is used. 0.0 means every feature is corrupted. Sensible
values are between 0.5-0.8.
convolutional : bool, optional
Whether or not to use a convolutional network or else a fully connected
network will be created. This effects the n_filters parameter's
meaning.
variational : bool, optional
Whether or not to create a variational embedding layer. This will
create a fully connected layer after the encoding, if `n_hidden` is
greater than 0, then will create a multivariate gaussian sampling
layer, then another fully connected layer. The size of the fully
connected layers are determined by `n_hidden`, and the size of the
sampling layer is determined by `n_code`.
Returns
-------
model : dict
{
'cost': Tensor to optimize.
'Ws': All weights of the encoder.
'x': Input Placeholder
'z': Inner most encoding Tensor (latent features)
'y': Reconstruction of the Decoder
'keep_prob': Amount to keep when using Dropout
'corrupt_prob': Amount to corrupt when using Denoising
'train': Set to True when training/Applies to Batch Normalization.
}
"""
# network input / placeholders for train (bn) and dropout
x = tf.placeholder(tf.float32, input_shape, 'x')
phase_train = tf.placeholder(tf.bool, name='phase_train')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
corrupt_prob = tf.placeholder(tf.float32, [1])
if denoising:
current_input = utils.corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
# 2d -> 4d if convolution
x_tensor = utils.to_tensor(x) if convolutional else x
current_input = x_tensor
Ws = []
shapes = []
# Build the encoder
for layer_i, n_output in enumerate(n_filters):
with tf.variable_scope('encoder/{}'.format(layer_i)):
shapes.append(current_input.get_shape().as_list())
if convolutional:
h, W = utils.conv2d(x=current_input,
n_output=n_output,
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
Ws.append(W)
current_input = h
shapes.append(current_input.get_shape().as_list())
with tf.variable_scope('variational'):
if variational:
dims = current_input.get_shape().as_list()
flattened = utils.flatten(current_input)
if n_hidden:
h = utils.linear(flattened, n_hidden, name='W_fc')[0]
h = activation(batch_norm(h, phase_train, 'fc/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = flattened
z_mu = utils.linear(h, n_code, name='mu')[0]
z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]
# Sample from noise distribution p(eps) ~ N(0, 1)
epsilon = tf.random_normal(
tf.pack([tf.shape(x)[0], n_code]))
# Sample from posterior
z = z_mu + tf.mul(epsilon, tf.exp(z_log_sigma))
if n_hidden:
h = utils.linear(z, n_hidden, name='fc_t')[0]
h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = z
size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
h = utils.linear(h, size, name='fc_t2')[0]
current_input = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
if dropout:
current_input = tf.nn.dropout(current_input, keep_prob)
if convolutional:
current_input = tf.reshape(
current_input, tf.pack([
tf.shape(current_input)[0],
dims[1],
dims[2],
dims[3]]))
else:
z = current_input
shapes.reverse()
n_filters.reverse()
Ws.reverse()
n_filters += [input_shape[-1]]
# %%
# Decoding layers
for layer_i, n_output in enumerate(n_filters[1:]):
with tf.variable_scope('decoder/{}'.format(layer_i)):
shape = shapes[layer_i + 1]
if convolutional:
h, W = utils.deconv2d(x=current_input,
n_output_h=shape[1],
n_output_w=shape[2],
n_output_ch=shape[3],
n_input_ch=shapes[layer_i][3],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
current_input = h
y = current_input
x_flat = utils.flatten(x)
y_flat = utils.flatten(y)
# l2 loss
loss_x = tf.reduce_sum(tf.squared_difference(x_flat, y_flat), 1)
if variational:
# variational lower bound, kl-divergence
loss_z = -0.5 * tf.reduce_sum(
1.0 + 2.0 * z_log_sigma -
tf.square(z_mu) - tf.exp(2.0 * z_log_sigma), 1)
# add l2 loss
cost = tf.reduce_mean(loss_x + loss_z)
else:
# just optimize l2 loss
cost = tf.reduce_mean(loss_x)
return {'cost': cost, 'Ws': Ws,
'x': x, 'z': z, 'y': y,
'keep_prob': keep_prob,
'corrupt_prob': corrupt_prob,
'train': phase_train}
def train_vae(files,
input_shape,
learning_rate=0.0001,
batch_size=100,
n_epochs=50,
n_examples=10,
crop_shape=[64, 64, 3],
crop_factor=0.8,
n_filters=[100, 100, 100, 100],
n_hidden=256,
n_code=50,
convolutional=True,
variational=True,
filter_sizes=[3, 3, 3, 3],
dropout=True,
keep_prob=0.8,
activation=tf.nn.relu,
img_step=100,
save_step=100,
ckpt_name="vae.ckpt"):
"""General purpose training of a (Variational) (Convolutional) Autoencoder.
Supply a list of file paths to images, and this will do everything else.
Parameters
----------
files : list of strings
List of paths to images.
input_shape : list
Must define what the input image's shape is.
learning_rate : float, optional
Learning rate.
batch_size : int, optional
Batch size.
n_epochs : int, optional
Number of epochs.
n_examples : int, optional
Number of example to use while demonstrating the current training
iteration's reconstruction. Creates a square montage, so make
sure int(sqrt(n_examples))**2 = n_examples, e.g. 16, 25, 36, ... 100.
crop_shape : list, optional
Size to centrally crop the image to.
crop_factor : float, optional
Resize factor to apply before cropping.
n_filters : list, optional
Same as VAE's n_filters.
n_hidden : int, optional
Same as VAE's n_hidden.
n_code : int, optional
Same as VAE's n_code.
convolutional : bool, optional
Use convolution or not.
variational : bool, optional
Use variational layer or not.
filter_sizes : list, optional
Same as VAE's filter_sizes.
dropout : bool, optional
Use dropout or not
keep_prob : float, optional
Percent of keep for dropout.
activation : function, optional
Which activation function to use.
img_step : int, optional
How often to save training images showing the manifold and
reconstruction.
save_step : int, optional
How often to save checkpoints.
ckpt_name : str, optional
Checkpoints will be named as this, e.g. 'model.ckpt'
"""
batch = create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
ae = VAE(input_shape=[None] + crop_shape,
convolutional=convolutional,
variational=variational,
n_filters=n_filters,
n_hidden=n_hidden,
n_code=n_code,
dropout=dropout,
filter_sizes=filter_sizes,
activation=activation)
# Create a manifold of our inner most layer to show
# example reconstructions. This is one way to see
# what the "embedding" or "latent space" of the encoder
# is capable of encoding, though note that this is just
# a random hyperplane within the latent space, and does not
# encompass all possible embeddings.
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# This will handle our threaded image pipeline
coord = tf.train.Coordinator()
# Ensure no more changes to graph
tf.get_default_graph().finalize()
# Start up the queues for handling the image pipeline
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if os.path.exists(ckpt_name + '.index') or os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
# Fit all training data
t_i = 0
batch_i = 0
epoch_i = 0
cost = 0
n_files = len(files)
test_xs = sess.run(batch) / 255.0
utils.montage(test_xs, 'test_xs.png')
try:
while not coord.should_stop() and epoch_i < n_epochs:
batch_i += 1
batch_xs = sess.run(batch) / 255.0
train_cost = sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True,
ae['keep_prob']: keep_prob})[0]
print(batch_i, train_cost)
cost += train_cost
if batch_i % n_files == 0:
print('epoch:', epoch_i)
print('average cost:', cost / batch_i)
cost = 0
batch_i = 0
epoch_i += 1
if batch_i % img_step == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
utils.montage(recon.reshape([-1] + crop_shape),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
print('reconstruction (min, max, mean):',
recon.min(), recon.max(), recon.mean())
utils.montage(recon.reshape([-1] + crop_shape),
'reconstruction_%08d.png' % t_i)
t_i += 1
if batch_i % save_step == 0:
# Save the variables to disk.
saver.save(sess, "./" + ckpt_name,
global_step=batch_i,
write_meta_graph=False)
except tf.errors.OutOfRangeError:
print('Done.')
finally:
# One of the threads has issued an exception. So let's tell all the
# threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
# %%
def test_mnist(n_epochs=10):
"""Train an autoencoder on MNIST.
This function will train an autoencoder on MNIST and also
save many image files during the training process, demonstrating
the latent space of the inner most dimension of the encoder,
as well as reconstructions of the decoder.
"""
# load MNIST
n_code = 2
mnist = MNIST(split=[0.8, 0.1, 0.1])
ae = VAE(input_shape=[None, 784], n_filters=[512, 256],
n_hidden=64, n_code=n_code, activation=tf.nn.sigmoid,
convolutional=False, variational=True)
n_examples = 100
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
learning_rate = 0.02
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Fit all training data
t_i = 0
batch_i = 0
batch_size = 200
test_xs = mnist.test.images[:n_examples]
utils.montage(test_xs.reshape((-1, 28, 28)), 'test_xs.png')
for epoch_i in range(n_epochs):
train_i = 0
train_cost = 0
for batch_xs, _ in mnist.train.next_batch(batch_size):
train_cost += sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True, ae['keep_prob']: 1.0})[0]
train_i += 1
if batch_i % 10 == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
m = utils.montage(recon.reshape((-1, 28, 28)),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
m = utils.montage(recon.reshape(
(-1, 28, 28)), 'reconstruction_%08d.png' % t_i)
t_i += 1
batch_i += 1
valid_i = 0
valid_cost = 0
for batch_xs, _ in mnist.valid.next_batch(batch_size):
valid_cost += sess.run([ae['cost']], feed_dict={
ae['x']: batch_xs, ae['train']: False, ae['keep_prob']: 1.0})[0]
valid_i += 1
print('train:', train_cost / train_i, 'valid:', valid_cost / valid_i)
def test_celeb(n_epochs=50):
"""Train an autoencoder on Celeb Net.
"""
files = CELEB()
train_vae(
files=files,
input_shape=[218, 178, 3],
batch_size=100,
n_epochs=n_epochs,
crop_shape=[64, 64, 3],
crop_factor=0.8,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./celeb.ckpt')
def test_sita():
"""Train an autoencoder on Sita Sings The Blues.
"""
if not os.path.exists('sita'):
os.system('wget http://ossguy.com/sita/Sita_Sings_the_Blues_640x360_XviD.avi')
os.mkdir('sita')
os.system('ffmpeg -i Sita_Sings_the_Blues_640x360_XviD.avi -r 60 -f' +
' image2 -s 160x90 sita/sita-%08d.jpg')
files = [os.path.join('sita', f) for f in os.listdir('sita')]
train_vae(
files=files,
input_shape=[90, 160, 3],
batch_size=100,
n_epochs=50,
crop_shape=[90, 160, 3],
crop_factor=1.0,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./sita.ckpt')
if __name__ == '__main__':
test_celeb()
|
goddoe/super_resolution
|
src/libs/vae.py
|
Python
|
mit
| 19,291
|
[
"Gaussian"
] |
3c8de51e9a722135df3920c33d7e65e4b9f0c4b6ae30fbb514d18fe70ea4ca01
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import unittest
import functools
import itertools
import types
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats
from skbio import Sequence, DNA, RNA, Protein, TabularMSA
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
from skbio.util._testing import ReallyEqualMixin
from skbio.metadata._testing import (MetadataMixinTests,
PositionalMetadataMixinTests)
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_index_equal
class TabularMSASubclass(TabularMSA):
"""Used for testing purposes."""
pass
class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
MetadataMixinTests):
def setUp(self):
self._metadata_constructor_ = functools.partial(TabularMSA, [])
class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
PositionalMetadataMixinTests):
def setUp(self):
def factory(axis_len, positional_metadata=None):
return TabularMSA([DNA('A' * axis_len)],
positional_metadata=positional_metadata)
self._positional_metadata_constructor_ = factory
class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
def test_from_dict_empty(self):
self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
def test_from_dict_single_sequence(self):
self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
TabularMSA([DNA('ACGT')], index=['foo']))
def test_from_dict_multiple_sequences(self):
msa = TabularMSA.from_dict(
{1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
# Sort because order is arbitrary.
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
def test_from_dict_invalid_input(self):
# Basic test to make sure error-checking in the TabularMSA constructor
# is being invoked.
with self.assertRaisesRegex(
ValueError, r'must match the number of positions'):
TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
def test_constructor_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, r'GrammaredSequence.*Sequence'):
TabularMSA([Sequence('')])
with self.assertRaisesRegex(TypeError, r'GrammaredSequence.*int'):
TabularMSA([42, DNA('')])
def test_constructor_not_monomorphic(self):
with self.assertRaisesRegex(TypeError,
r'matching type.*RNA.*DNA'):
TabularMSA([DNA(''), RNA('')])
with self.assertRaisesRegex(TypeError,
r'matching type.*float.*Protein'):
TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
def test_constructor_unequal_length(self):
with self.assertRaisesRegex(
ValueError,
r'must match the number of positions.*1 != 0'):
TabularMSA([Protein(''), Protein('P')])
with self.assertRaisesRegex(
ValueError,
r'must match the number of positions.*1 != 3'):
TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
def test_constructor_non_iterable(self):
with self.assertRaises(TypeError):
TabularMSA(42)
def test_constructor_minter_and_index_both_provided(self):
with self.assertRaisesRegex(ValueError, r'both.*minter.*index'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
index=['a', 'b'])
def test_constructor_invalid_minter_callable(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
def test_constructor_missing_minter_metadata_key(self):
with self.assertRaises(KeyError):
TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
minter='foo')
def test_constructor_unhashable_minter_metadata_key(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
def test_constructor_index_length_mismatch_iterable(self):
with self.assertRaisesRegex(ValueError,
r'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
def test_constructor_index_length_mismatch_index_object(self):
with self.assertRaisesRegex(ValueError,
r'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
def test_constructor_invalid_index_scalar(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=42)
def test_constructor_non_unique_labels(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
assert_index_equal(msa.index, pd.Int64Index([1, 1]))
def test_constructor_empty_no_index(self):
# sequence empty
msa = TabularMSA([])
self.assertIsNone(msa.dtype)
self.assertEqual(msa.shape, (0, 0))
assert_index_equal(msa.index, pd.RangeIndex(0))
with self.assertRaises(StopIteration):
next(iter(msa))
# position empty
seqs = [DNA(''), DNA('')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (2, 0))
assert_index_equal(msa.index, pd.RangeIndex(2))
self.assertEqual(list(msa), seqs)
def test_constructor_empty_with_labels(self):
# sequence empty
msa = TabularMSA([], minter=lambda x: x)
assert_index_equal(msa.index, pd.Index([]))
msa = TabularMSA([], index=iter([]))
assert_index_equal(msa.index, pd.Index([]))
# position empty
msa = TabularMSA([DNA('', metadata={'id': 42}),
DNA('', metadata={'id': 43})], minter='id')
assert_index_equal(msa.index, pd.Index([42, 43]))
msa = TabularMSA([DNA(''), DNA('')], index=iter([42, 43]))
assert_index_equal(msa.index, pd.Index([42, 43]))
def test_constructor_non_empty_no_labels_provided(self):
# 1x3
seqs = [DNA('ACG')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (1, 3))
assert_index_equal(msa.index, pd.RangeIndex(1))
self.assertEqual(list(msa), seqs)
# 3x1
seqs = [DNA('A'), DNA('C'), DNA('G')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 1))
assert_index_equal(msa.index, pd.RangeIndex(3))
self.assertEqual(list(msa), seqs)
def test_constructor_non_empty_with_labels_provided(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(seqs, minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
msa = TabularMSA(seqs, index=iter([42, 43, 44]))
assert_index_equal(msa.index, pd.Index([42, 43, 44]))
def test_constructor_works_with_iterator(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(iter(seqs), minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
def test_constructor_with_multiindex_index(self):
msa = TabularMSA([DNA('AA'), DNA('GG')],
index=[('foo', 42), ('bar', 43)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_constructor_with_multiindex_minter(self):
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa = TabularMSA([DNA('AC'), DNA('GG')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_copy_constructor_respects_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----'), DNA('AAAA')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(msa.index, pd.RangeIndex(3))
assert_index_equal(copy.index, pd.RangeIndex(3))
def test_copy_constructor_without_metadata(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(copy.index, pd.RangeIndex(2))
def test_copy_constructor_with_metadata(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
self.assertIsNot(msa.metadata, copy.metadata)
self.assertIsNot(msa.positional_metadata, copy.positional_metadata)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, copy.index)
def test_copy_constructor_state_override_with_minter(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str)
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str))
def test_copy_constructor_state_override_with_index(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b'])
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b']))
def test_copy_constructor_with_minter_and_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')], index=['idx1', 'idx2'])
with self.assertRaisesRegex(ValueError, r'both.*minter.*index'):
TabularMSA(msa, index=['a', 'b'], minter=str)
def test_dtype(self):
self.assertIsNone(TabularMSA([]).dtype)
self.assertIs(TabularMSA([Protein('')]).dtype, Protein)
with self.assertRaises(AttributeError):
TabularMSA([]).dtype = DNA
with self.assertRaises(AttributeError):
del TabularMSA([]).dtype
def test_shape(self):
shape = TabularMSA([DNA('ACG'), DNA('GCA')]).shape
self.assertEqual(shape, (2, 3))
self.assertEqual(shape.sequence, shape[0])
self.assertEqual(shape.position, shape[1])
with self.assertRaises(TypeError):
shape[0] = 3
with self.assertRaises(AttributeError):
TabularMSA([]).shape = (3, 3)
with self.assertRaises(AttributeError):
del TabularMSA([]).shape
def test_index_getter_default_index(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
assert_index_equal(msa.index, pd.RangeIndex(3))
# immutable
with self.assertRaises(TypeError):
msa.index[1] = 2
# original state is maintained
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_index_getter(self):
index = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')], minter=str).index
self.assertIsInstance(index, pd.Index)
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
# immutable
with self.assertRaises(TypeError):
index[1] = 'AA'
# original state is maintained
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
def test_index_mixed_type(self):
msa = TabularMSA([DNA('AC'), DNA('CA'), DNA('AA')],
index=['abc', 'd', 42])
assert_index_equal(msa.index, pd.Index(['abc', 'd', 42]))
def test_index_setter_empty(self):
msa = TabularMSA([])
msa.index = iter([])
assert_index_equal(msa.index, pd.Index([]))
def test_index_setter_non_empty(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
msa.index = range(3)
assert_index_equal(msa.index, pd.RangeIndex(3))
msa.index = range(3, 6)
assert_index_equal(msa.index, pd.RangeIndex(3, 6))
def test_index_setter_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
index = pd.Index(['ACGT', 'TGCA'])
assert_index_equal(msa.index, index)
with self.assertRaisesRegex(ValueError, r'Length mismatch.*2.*3'):
msa.index = iter(['ab', 'cd', 'ef'])
# original state is maintained
assert_index_equal(msa.index, index)
def test_index_setter_non_unique_index(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
msa.index = ['1', '1']
self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')],
index=['1', '1']))
def test_index_setter_tuples(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')])
msa.index = [('foo', 42), ('bar', 43)]
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43)], tupleize_cols=True))
def test_index_setter_preserves_range_index(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
msa.index = pd.RangeIndex(2)
self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_index_deleter(self):
msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
assert_index_equal(msa.index, pd.Index(['UUU', 'AAA']))
del msa.index
assert_index_equal(msa.index, pd.RangeIndex(2))
# Delete again.
del msa.index
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_bool(self):
self.assertFalse(TabularMSA([]))
self.assertFalse(TabularMSA([RNA('')]))
self.assertFalse(
TabularMSA([RNA('', metadata={'id': 1}),
RNA('', metadata={'id': 2})], minter='id'))
self.assertTrue(TabularMSA([RNA('U')]))
self.assertTrue(TabularMSA([RNA('--'), RNA('..')]))
self.assertTrue(TabularMSA([RNA('AUC'), RNA('GCA')]))
def test_len(self):
self.assertEqual(len(TabularMSA([])), 0)
self.assertEqual(len(TabularMSA([DNA('')])), 1)
self.assertEqual(len(TabularMSA([DNA('AT'), DNA('AG'), DNA('AT')])), 3)
def test_iter(self):
with self.assertRaises(StopIteration):
next(iter(TabularMSA([])))
seqs = [DNA(''), DNA('')]
self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
seqs = [DNA('AAA'), DNA('GCT')]
self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
def test_reversed(self):
with self.assertRaises(StopIteration):
next(reversed(TabularMSA([])))
seqs = [DNA(''), DNA('', metadata={'id': 42})]
self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
seqs = [DNA('AAA'), DNA('GCT')]
self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
def test_eq_and_ne(self):
# Each element contains the components necessary to construct a
# TabularMSA object: seqs and kwargs. None of these objects (once
# constructed) should compare equal to one another.
components = [
# empties
([], {}),
([RNA('')], {}),
([RNA('')], {'minter': str}),
# 1x1
([RNA('U')], {'minter': str}),
# 2x3
([RNA('AUG'), RNA('GUA')], {'minter': str}),
([RNA('AG'), RNA('GG')], {}),
# has labels
([RNA('AG'), RNA('GG')], {'minter': str}),
# different dtype
([DNA('AG'), DNA('GG')], {'minter': str}),
# different labels
([RNA('AG'), RNA('GG')], {'minter': lambda x: str(x) + '42'}),
# different sequence metadata
([RNA('AG', metadata={'id': 42}), RNA('GG')], {'minter': str}),
# different sequence data, same labels
([RNA('AG'), RNA('GA')],
{'minter': lambda x: 'AG' if 'AG' in x else 'GG'}),
# different MSA metadata
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42}}),
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 43}}),
([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42, 'bar': 43}}),
# different MSA positional metadata
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [42, 43]}}),
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [43, 44]}}),
([RNA('AG'), RNA('GG')],
{'positional_metadata': {'foo': [42, 43], 'bar': [43, 44]}}),
]
for seqs, kwargs in components:
obj = TabularMSA(seqs, **kwargs)
self.assertReallyEqual(obj, obj)
self.assertReallyEqual(obj, TabularMSA(seqs, **kwargs))
self.assertReallyEqual(obj, TabularMSASubclass(seqs, **kwargs))
for (seqs1, kwargs1), (seqs2, kwargs2) in \
itertools.combinations(components, 2):
obj1 = TabularMSA(seqs1, **kwargs1)
obj2 = TabularMSA(seqs2, **kwargs2)
self.assertReallyNotEqual(obj1, obj2)
self.assertReallyNotEqual(obj1,
TabularMSASubclass(seqs2, **kwargs2))
# completely different types
msa = TabularMSA([])
self.assertReallyNotEqual(msa, 42)
self.assertReallyNotEqual(msa, [])
self.assertReallyNotEqual(msa, {})
self.assertReallyNotEqual(msa, '')
def test_eq_constructed_from_different_iterables_compare_equal(self):
msa1 = TabularMSA([DNA('ACGT')])
msa2 = TabularMSA((DNA('ACGT'),))
self.assertReallyEqual(msa1, msa2)
def test_eq_ignores_minter_str_and_lambda(self):
# as long as the labels generated by the minters are the same, it
# doesn't matter whether the minters are equal.
msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})],
minter=lambda x: x.metadata['id'])
self.assertReallyEqual(msa1, msa2)
def test_eq_minter_and_index(self):
# as long as the labels generated by the minters are the same, it
# doesn't matter whether the minters are equal.
msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], index=['a'])
msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
self.assertReallyEqual(msa1, msa2)
def test_eq_default_index_and_equivalent_provided_index(self):
msa1 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')])
msa2 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')],
index=[0, 1, 2])
self.assertReallyEqual(msa1, msa2)
assert_index_equal(msa1.index, pd.RangeIndex(3))
assert_index_equal(msa2.index, pd.Int64Index([0, 1, 2]))
def test_reassign_index_empty(self):
# sequence empty
msa = TabularMSA([])
msa.reassign_index()
self.assertEqual(msa, TabularMSA([]))
assert_index_equal(msa.index, pd.RangeIndex(0))
msa.reassign_index(minter=str)
self.assertEqual(msa, TabularMSA([], minter=str))
assert_index_equal(msa.index, pd.Index([]))
# position empty
msa = TabularMSA([DNA('')])
msa.reassign_index()
self.assertEqual(msa, TabularMSA([DNA('')]))
assert_index_equal(msa.index, pd.RangeIndex(1))
msa.reassign_index(minter=str)
self.assertEqual(msa, TabularMSA([DNA('')], minter=str))
assert_index_equal(msa.index, pd.Index(['']))
def test_reassign_index_non_empty(self):
msa = TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], minter=str)
assert_index_equal(msa.index, pd.Index(['ACG', 'AAA']))
msa.reassign_index(minter='id')
self.assertEqual(
msa,
TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], minter='id'))
assert_index_equal(msa.index, pd.Index([1, 2]))
msa.reassign_index(mapping={1: 5})
self.assertEqual(
msa,
TabularMSA([DNA('ACG', metadata={'id': 1}),
DNA('AAA', metadata={'id': 2})], index=[5, 2]))
assert_index_equal(msa.index, pd.Index([5, 2]))
msa.reassign_index()
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_reassign_index_minter_and_mapping_both_provided(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
with self.assertRaisesRegex(ValueError, r'both.*mapping.*minter.*'):
msa.reassign_index(minter=str, mapping={"ACGT": "fleventy"})
# original state is maintained
assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
def test_reassign_index_mapping_invalid_type(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
with self.assertRaisesRegex(TypeError,
r'mapping.*dict.*callable.*list'):
msa.reassign_index(mapping=['abc', 'def'])
# original state is maintained
assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
def test_reassign_index_with_mapping_dict_empty(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping={})
self.assertEqual(msa, TabularMSA(seqs, index=[0.5, 1.5, 2.5]))
def test_reassign_index_with_mapping_dict_subset(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 2.5: "c"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 1.5, 'c']))
def test_reassign_index_with_mapping_dict_superset(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'b', 'c']))
def test_reassign_index_with_mapping_callable(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
msa = TabularMSA(seqs, index=[0, 1, 2])
msa.reassign_index(mapping=str)
self.assertEqual(msa, TabularMSA(seqs, index=['0', '1', '2']))
msa.reassign_index(mapping=lambda e: int(e) + 42)
self.assertEqual(msa, TabularMSA(seqs, index=[42, 43, 44]))
def test_reassign_index_non_unique_existing_index(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
msa = TabularMSA(seqs, index=[0.5, 0.5, 0.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
def test_reassign_index_non_unique_new_index(self):
seqs = [DNA("A"), DNA("C"), DNA("G")]
mapping = {0.5: "a", 1.5: "a", 2.5: "a"}
msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
msa.reassign_index(mapping=mapping)
self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
def test_reassign_index_to_multiindex_with_minter(self):
msa = TabularMSA([DNA('AC'), DNA('.G')])
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa.reassign_index(minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('.G')],
index=[('foo', 42), ('bar', 43)]))
def test_reassign_index_to_multiindex_with_mapping(self):
msa = TabularMSA([DNA('AC'), DNA('.G')])
mapping = {0: ('foo', 42), 1: ('bar', 43)}
msa.reassign_index(mapping=mapping)
self.assertIsInstance(msa.index, pd.MultiIndex)
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('.G')],
index=[('foo', 42), ('bar', 43)]))
def test_sort_on_unorderable_msa_index(self):
msa = TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
index=[42, 41, 'foo'])
with self.assertRaises(TypeError):
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
index=[42, 41, 'foo']))
def test_sort_empty_on_msa_index(self):
msa = TabularMSA([], index=[])
msa.sort()
self.assertEqual(msa, TabularMSA([], index=[]))
msa = TabularMSA([], index=[])
msa.sort(ascending=False)
self.assertEqual(msa, TabularMSA([], index=[]))
def test_sort_single_sequence_on_msa_index(self):
msa = TabularMSA([DNA('ACGT')], index=[42])
msa.sort()
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
msa = TabularMSA([DNA('ACGT')], index=[42])
msa.sort(ascending=False)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
def test_sort_multiple_sequences_on_msa_index(self):
msa = TabularMSA([
DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
msa.sort(ascending=True)
self.assertEqual(
msa,
TabularMSA([
DNA('GG'), DNA('CC'), DNA('TC')], index=['a', 'b', 'z']))
msa = TabularMSA([
DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([
DNA('TC'), DNA('CC'), DNA('GG')], index=['z', 'b', 'a']))
def test_sort_on_labels_with_some_repeats(self):
msa = TabularMSA([
DNA('TCCG', metadata={'id': 10}),
DNA('TAGG', metadata={'id': 10}),
DNA('GGGG', metadata={'id': 8}),
DNA('TGGG', metadata={'id': 10}),
DNA('ACGT', metadata={'id': 0}),
DNA('TAGA', metadata={'id': 10})], minter='id')
msa.sort()
self.assertEqual(
msa,
TabularMSA([
DNA('ACGT', metadata={'id': 0}),
DNA('GGGG', metadata={'id': 8}),
DNA('TCCG', metadata={'id': 10}),
DNA('TAGG', metadata={'id': 10}),
DNA('TGGG', metadata={'id': 10}),
DNA('TAGA', metadata={'id': 10})], minter='id'))
def test_sort_on_key_with_all_repeats(self):
msa = TabularMSA([
DNA('TTT', metadata={'id': 'a'}),
DNA('TTT', metadata={'id': 'b'}),
DNA('TTT', metadata={'id': 'c'})], minter=str)
msa.sort()
self.assertEqual(
msa,
TabularMSA([
DNA('TTT', metadata={'id': 'a'}),
DNA('TTT', metadata={'id': 'b'}),
DNA('TTT', metadata={'id': 'c'})], minter=str))
def test_sort_default_index(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')]))
def test_sort_default_index_descending(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('CC'), DNA('GG'), DNA('TC')], index=[2, 1, 0]))
def test_sort_already_sorted(self):
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3]))
msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1]))
def test_sort_reverse_sorted(self):
msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[3, 2, 1])
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[1, 2, 3]))
msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[1, 2, 3])
msa.sort(ascending=False)
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[3, 2, 1]))
def test_sort_multiindex(self):
multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
sortedindex = [(1, 'c'), (2, 'a'), (3, 'b')]
msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
msa.sort()
self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
index=sortedindex))
def test_sort_multiindex_with_level(self):
multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
first_sorted = [(1, 'c'), (2, 'a'), (3, 'b')]
second_sorted = [(2, 'a'), (3, 'b'), (1, 'c')]
msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
self.assertIsInstance(msa.index, pd.MultiIndex)
msa.sort(level=0)
self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
index=first_sorted))
msa.sort(level=1)
self.assertEqual(msa, TabularMSA([DNA('A'), DNA('G'), DNA('C')],
index=second_sorted))
def test_to_dict_falsey_msa(self):
self.assertEqual(TabularMSA([]).to_dict(), {})
self.assertEqual(TabularMSA([RNA('')], index=['foo']).to_dict(),
{'foo': RNA('')})
def test_to_dict_non_empty(self):
seqs = [Protein('PAW', metadata={'id': 42}),
Protein('WAP', metadata={'id': -999})]
msa = TabularMSA(seqs, minter='id')
self.assertEqual(msa.to_dict(), {42: seqs[0], -999: seqs[1]})
def test_to_dict_default_index(self):
msa = TabularMSA([RNA('UUA'), RNA('-C-'), RNA('AAA')])
d = msa.to_dict()
self.assertEqual(d, {0: RNA('UUA'), 1: RNA('-C-'), 2: RNA('AAA')})
def test_to_dict_duplicate_labels(self):
msa = TabularMSA([DNA("A"), DNA("G")], index=[0, 0])
with self.assertRaises(ValueError) as cm:
msa.to_dict()
self.assertIn("unique", str(cm.exception))
def test_from_dict_to_dict_roundtrip(self):
d = {}
self.assertEqual(TabularMSA.from_dict(d).to_dict(), d)
# can roundtrip even with mixed key types
d1 = {'a': DNA('CAT'), 42: DNA('TAG')}
d2 = TabularMSA.from_dict(d1).to_dict()
self.assertEqual(d2, d1)
self.assertIs(d1['a'], d2['a'])
self.assertIs(d1[42], d2[42])
class TestContains(unittest.TestCase):
def test_no_sequences(self):
msa = TabularMSA([], index=[])
self.assertFalse('' in msa)
self.assertFalse('foo' in msa)
def test_with_str_labels(self):
msa = TabularMSA([RNA('AU'), RNA('A.')], index=['foo', 'bar'])
self.assertTrue('foo' in msa)
self.assertTrue('bar' in msa)
self.assertFalse('baz' in msa)
self.assertFalse(0 in msa)
def test_with_int_labels(self):
msa = TabularMSA([RNA('AU'), RNA('A.')], index=[42, -1])
self.assertTrue(42 in msa)
self.assertTrue(-1 in msa)
self.assertFalse(0 in msa)
self.assertFalse('foo' in msa)
class TestCopy(unittest.TestCase):
# Note: tests for metadata/positional_metadata are in mixin tests above.
def test_no_sequences(self):
msa = TabularMSA([])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
def test_with_sequences(self):
msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
self.assertIsNot(msa[0], msa_copy[0])
self.assertIsNot(msa[1], msa_copy[1])
msa_copy.append(DNA('AAAA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['bar'] = 42
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['foo'].append(2)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1, 2]}), DNA('TGCA')]))
def test_with_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
msa_copy = copy.copy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, msa_copy.index)
msa_copy.index = [1, 2]
assert_index_equal(msa_copy.index, pd.Index([1, 2]))
assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
class TestDeepCopy(unittest.TestCase):
# Note: tests for metadata/positional_metadata are in mixin tests above.
def test_no_sequences(self):
msa = TabularMSA([])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
def test_with_sequences(self):
msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
self.assertIsNot(msa._seqs, msa_copy._seqs)
self.assertIsNot(msa[0], msa_copy[0])
self.assertIsNot(msa[1], msa_copy[1])
msa_copy.append(DNA('AAAA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['bar'] = 42
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
msa_copy._seqs[0].metadata['foo'].append(2)
self.assertEqual(
msa,
TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
def test_with_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
msa_copy = copy.deepcopy(msa)
self.assertEqual(msa, msa_copy)
self.assertIsNot(msa, msa_copy)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, msa_copy.index)
msa_copy.index = [1, 2]
assert_index_equal(msa_copy.index, pd.Index([1, 2]))
assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
class SharedIndexTests:
def get(self, obj, indexable):
raise NotImplementedError()
def test_tuple_too_big(self):
with self.assertRaises(ValueError):
self.get(TabularMSA([]), (None, None, None))
def test_empty_msa_slice(self):
msa = TabularMSA([])
new = self.get(msa, slice(None, None))
self.assertIsNot(msa, new)
self.assertEqual(msa, new)
def test_msa_slice_all_first_axis(self):
msa = TabularMSA([RNA("AAA", metadata={1: 1}),
RNA("AAU", positional_metadata={0: [1, 2, 3]})],
metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
new_slice = self.get(msa, slice(None))
new_ellipsis = self.get(msa, Ellipsis)
self.assertIsNot(msa, new_slice)
for s1, s2 in zip(msa, new_slice):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_slice)
self.assertIsNot(msa, new_ellipsis)
for s1, s2 in zip(msa, new_ellipsis):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_ellipsis)
def test_msa_slice_all_both_axes(self):
msa = TabularMSA([RNA("AAA", metadata={1: 1}),
RNA("AAU", positional_metadata={0: [1, 2, 3]})],
metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
new_slice = self.get(msa, (slice(None), slice(None)))
new_ellipsis = self.get(msa, (Ellipsis, Ellipsis))
self.assertIsNot(msa, new_slice)
for s1, s2 in zip(msa, new_slice):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_slice)
self.assertIsNot(msa, new_ellipsis)
for s1, s2 in zip(msa, new_ellipsis):
self.assertIsNot(s1, s2)
self.assertEqual(msa, new_ellipsis)
def test_bool_index_first_axis(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, [True, True, False])
self.assertEqual(new, TabularMSA([a, b], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False]))
def test_bool_index_second_axis(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, (Ellipsis, [True, True, False]))
self.assertEqual(new, TabularMSA([a[0, 1], b[0, 1], c[0, 1]],
metadata={0: 'x'},
positional_metadata={0: [1, 2]},
index=[True, False, True]))
def test_bool_index_both_axes(self):
a = DNA("AAA", metadata={1: 1})
b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
c = DNA("AAC")
msa = TabularMSA([a, b, c], metadata={0: 'x'},
positional_metadata={0: [1, 2, 3]},
index=[True, False, True])
new = self.get(msa, ([False, True, True], [True, True, False]))
self.assertEqual(new, TabularMSA([b[0, 1], c[0, 1]],
metadata={0: 'x'},
positional_metadata={0: [1, 2]},
index=[False, True]))
def test_bool_index_too_big(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
index=[False, True, False])
with self.assertRaises(IndexError):
self.get(msa, [False, False, False, False])
with self.assertRaises(IndexError):
self.get(msa, [True, True, True, True])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [True, False, True, False, True]))
with self.assertRaises(IndexError):
self.get(msa, ([True, False, True, False],
[True, False, True, False, False]))
def test_bool_index_too_small(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
index=[False, True, False])
with self.assertRaises(IndexError):
self.get(msa, [False])
with self.assertRaises(IndexError):
self.get(msa, [True])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [True]))
with self.assertRaises(IndexError):
self.get(msa, ([True, False], [True, False, True, False]))
def test_bad_scalar(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises((KeyError, TypeError)):
self.get(msa, "foo")
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, "foo"))
def test_bad_fancy_index(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises((KeyError, TypeError, ValueError)):
self.get(msa, [0, "foo"])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [0, "foo"]))
def test_asburd_slice(self):
msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
with self.assertRaises(TypeError):
self.get(msa, {set(1): 0})
class SharedPropertyIndexTests(SharedIndexTests):
def setUp(self):
self.combo_msa = TabularMSA([
DNA('ACGTA', metadata={0: 0},
positional_metadata={0: [1, 2, 3, 4, 5]}),
DNA('CGTAC', metadata={1: 1},
positional_metadata={1: [1, 2, 3, 4, 5]}),
DNA('GTACG', metadata={2: 2},
positional_metadata={2: [1, 2, 3, 4, 5]}),
DNA('TACGT', metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4, 5]}),
DNA('ACGTT', metadata={4: 4},
positional_metadata={4: [1, 2, 3, 4, 5]})
], index=list('ABCDE'), metadata={'x': 'x'},
positional_metadata={'y': [5, 4, 3, 2, 1]})
"""First off, sorry to the next person who has to deal with this.
The next few tests will try and slice by a bunch of stuff, with
all combinations. Each element in the two lists is a tuple where
the first element is the thing to slice with, and the second is
the equivalent fancy index which describes the same range.
This lets us describe the results a little more declaratively
without setting up a thousand tests for each possible combination.
This does mean the iloc via a fancy index and simple scalar must
work correctly.
"""
# This will be overriden for TestLoc because the first axis are labels
self.combo_first_axis = [
([], []),
(slice(0, 0), []),
(Ellipsis, [0, 1, 2, 3, 4]),
(slice(None), [0, 1, 2, 3, 4]),
(slice(0, 10000), [0, 1, 2, 3, 4]),
(3, 3),
(-4, 1),
([0], [0]),
([2], [2]),
(slice(1, 3), [1, 2]),
(slice(3, 0, -1), [3, 2, 1]),
([-3, 2, 1], [2, 2, 1]),
([-4, -3, -2, -1], [1, 2, 3, 4]),
(np.array([-3, 2, 1]), [2, 2, 1]),
([True, True, False, False, True], [0, 1, 4]),
(np.array([True, True, False, True, False]), [0, 1, 3]),
(range(3), [0, 1, 2]),
([slice(0, 2), slice(3, 4), 4], [0, 1, 3, 4])
]
# Same in both TestLoc and TestILoc
self.combo_second_axis = self.combo_first_axis
def test_combo_single_axis_natural(self):
for idx, exp in self.combo_first_axis:
self.assertEqual(self.get(self.combo_msa, idx),
self.combo_msa.iloc[exp],
msg="%r did not match iloc[%r]" % (idx, exp))
def test_combo_first_axis_only(self):
for idx, exp in self.combo_first_axis:
self.assertEqual(self.get(self.combo_msa, idx, axis=0),
self.combo_msa.iloc[exp, ...],
msg="%r did not match iloc[%r, ...]" % (idx, exp))
def test_combo_second_axis_only(self):
for idx, exp in self.combo_second_axis:
self.assertEqual(self.get(self.combo_msa, idx, axis=1),
self.combo_msa.iloc[..., exp],
msg="%r did not match iloc[..., %r]" % (idx, exp))
def test_combo_both_axes(self):
for idx1, exp1 in self.combo_first_axis:
for idx2, exp2 in self.combo_second_axis:
self.assertEqual(self.get(self.combo_msa, (idx1, idx2)),
self.combo_msa.iloc[exp1, exp2],
msg=("%r did not match iloc[%r, %r]"
% ((idx1, idx2), exp1, exp2)))
class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
def setUp(self):
SharedPropertyIndexTests.setUp(self)
self.combo_first_axis = [
([], []),
(slice('X', "Z"), []),
('A', 0),
('E', 4),
(['B'], [1]),
(np.asarray(['B']), [1]),
(slice('A', 'C', 2), [0, 2]),
(slice('C', 'A', -2), [2, 0]),
(slice('A', 'B'), [0, 1]),
(slice(None), [0, 1, 2, 3, 4]),
(slice('A', None), [0, 1, 2, 3, 4]),
(slice(None, 'C'), [0, 1, 2]),
(Ellipsis, [0, 1, 2, 3, 4]),
(self.combo_msa.index, [0, 1, 2, 3, 4]),
(['B', 'A', 'A', 'C'], [1, 0, 0, 2]),
(np.asarray(['B', 'A', 'A', 'C']), [1, 0, 0, 2]),
([True, False, True, True, False], [0, 2, 3]),
(np.asarray([True, False, True, True, False]), [0, 2, 3]),
]
def test_forced_axis_returns_copy(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertIsNot(msa.loc(axis=1), msa.loc)
def test_forced_axis_no_mutate(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertEqual(msa.loc(axis=1)[0], Sequence("EE"))
self.assertEqual(msa.loc[0], Protein("EVANTHQMVS"))
self.assertIsNone(msa.loc._axis)
def get(self, obj, indexable, axis=None):
if axis is None:
return obj.loc[indexable]
else:
return obj.loc(axis=axis)[indexable]
def test_complex_single_label(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
self.assertIs(a, self.get(msa, (('a', 0),)))
self.assertIs(b, self.get(msa, (('a', 1),)))
self.assertIs(c, self.get(msa, (('b', 0),)))
def test_partial_label(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
exp_a = TabularMSA([a, b], index=[0, 1])
exp_b = TabularMSA([c], index=[0])
self.assertEqual(self.get(msa, 'a'), exp_a)
self.assertEqual(self.get(msa, 'b'), exp_b)
def test_label_not_exists(self):
msa = TabularMSA([DNA("ACG")], index=['foo'])
with self.assertRaises(KeyError):
self.get(msa, 'bar')
def test_duplicate_index_nonscalar_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0, 1, 2])
self.assertEqual(self.get(msa, 0),
TabularMSA([a, b], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0]))
def test_duplicate_index_scalar_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[0, 0, 1, 2])
self.assertEqual(self.get(msa, 1), c)
def test_multiindex_complex(self):
a = DNA("ACG")
b = DNA("ACT")
c = DNA("ACA")
msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
exp = TabularMSA([a, c], index=[('a', 0), ('b', 0)])
self.assertEqual(self.get(msa, [('a', 0), ('b', 0)]), exp)
def test_fancy_index_missing_label(self):
msa = TabularMSA([DNA("ACG")], index=['foo'])
with self.assertRaises(KeyError):
self.get(msa, ['foo', 'bar'])
with self.assertRaises(KeyError):
self.get(msa, ['bar'])
def test_multiindex_fancy_indexing_incomplete_label(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (('a', 'x'), Ellipsis)),
TabularMSA([a, b], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c',
'd']},
index=[0, 1]))
def test_multiindex_complicated_axis(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (([False, True, False, True],
'x', 0), Ellipsis)),
TabularMSA([d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c',
'd']},
index=[('b', 'x', 0)]))
def test_multiindex_complicated_axis_empty_selection(self):
a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'c': ['a', 'b', 'c', 'd']},
index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
('b', 'x', 0)])
self.assertEqual(self.get(msa, (([False, True, False, True],
'x', 2), Ellipsis)),
TabularMSA([], metadata={'x': 'y'},
# TODO: Change for #1198
positional_metadata=None,
index=[]))
def test_bool_index_scalar_bool_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[False, True, False, False])
self.assertEqual(self.get(msa, True), b)
def test_bool_index_nonscalar_bool_label(self):
a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[False, True, False, True])
self.assertEqual(self.get(msa, True),
TabularMSA([b, d], metadata={'x': 'y'},
positional_metadata={'z': [1, 2, 3, 4]},
index=[True, True]))
def test_categorical_index_scalar_label(self):
msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
self.assertEqual(self.get(msa, 'a'), RNA("ACUG"))
def test_categorical_index_nonscalar_label(self):
msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
self.assertEqual(self.get(msa, 'b'),
TabularMSA([RNA("ACUA"), RNA("AAUG")],
index=pd.CategoricalIndex(
['b', 'b'], categories=['a', 'b', 'c'])
))
def test_float_index_out_of_order_slice(self):
msa = TabularMSA([DNA("ACGG"), DNA("AAGC"), DNA("AAAA"), DNA("ACTC")],
index=[0.1, 2.4, 5.1, 2.6])
with self.assertRaises(KeyError):
self.get(msa, slice(0.1, 2.7))
msa.sort()
result = self.get(msa, slice(0.1, 2.7))
self.assertEqual(result, TabularMSA([DNA("ACGG"), DNA("AAGC"),
DNA("ACTC")],
index=[0.1, 2.4, 2.6]))
def test_nonscalar_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaisesRegex(TypeError,
r'tuple.*independent.*MultiIndex'):
self.get(msa, ['a', 'b'])
def test_missing_first_nonscalar_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaises(KeyError):
self.get(msa, ['x', 'a', 'b'])
def test_tuple_fancy_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
with self.assertRaisesRegex(TypeError, r'tuple.*pd.MultiIndex.*label'):
self.get(msa, ((('a', 0, 1), ('b', 0, 1)), Ellipsis))
def test_non_multiindex_tuple(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
with self.assertRaisesRegex(TypeError, r'tuple.*first axis'):
self.get(msa, ((0, 1), Ellipsis))
def test_assertion_exists_for_future_failure_of_get_sequence_loc(self):
# Ideally we wouldn't need this test or the branch, but the most common
# failure for pandas would be returning a series instead of the value.
# We should make sure that the user get's an error should this ever
# happen again. Getting a series of DNA looks pretty weird...
msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
with self.assertRaises(AssertionError):
msa._get_sequence_loc_([1, 2])
class TestILoc(SharedPropertyIndexTests, unittest.TestCase):
def setUp(self):
SharedPropertyIndexTests.setUp(self)
self.combo_first_axis = self.combo_second_axis
def test_forced_axis_returns_copy(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertIsNot(msa.iloc(axis=1), msa.iloc)
def test_forced_axis_no_mutate(self):
msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
self.assertEqual(msa.iloc(axis=1)[0], Sequence("EE"))
self.assertEqual(msa.iloc[0], Protein("EVANTHQMVS"))
self.assertIsNone(msa.iloc._axis)
def get(self, obj, indexable, axis=None):
if axis is None:
return obj.iloc[indexable]
else:
return obj.iloc(axis=axis)[indexable]
def test_entire_fancy_first_axis(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, np.arange(2))
new_list_simple = self.get(msa, [0, 1])
new_list_backwards = self.get(msa, [-2, -1])
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_entire_second_axis(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, (Ellipsis, np.arange(4)))
new_list_simple = self.get(msa, (Ellipsis, [0, 1, 2, 3]))
new_list_backwards = self.get(msa, (Ellipsis, [-4, -3, -2, -1]))
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_entire_both_axes(self):
msa = TabularMSA([
DNA("ACCA", metadata={'a': 'foo'},
positional_metadata={'a': [7, 6, 5, 4]}),
DNA("GGAA", metadata={'b': 'bar'},
positional_metadata={'b': [3, 4, 5, 6]})
], metadata={'c': 'baz'},
positional_metadata={'foo': [1, 2, 3, 4]})
new_np_simple = self.get(msa, (np.arange(2), np.arange(4)))
new_list_simple = self.get(msa, ([0, 1], [0, 1, 2, 3]))
new_list_backwards = self.get(msa, ([-2, -1], [-4, -3, -2, -1]))
self.assertIsNot(msa, new_np_simple)
self.assertEqual(msa, new_np_simple)
self.assertIsNot(msa, new_list_simple)
self.assertEqual(msa, new_list_simple)
self.assertIsNot(msa, new_list_backwards)
self.assertEqual(msa, new_list_backwards)
def test_fancy_out_of_bound(self):
with self.assertRaises(IndexError):
self.get(TabularMSA([DNA('AC')]), [0, 1, 2])
with self.assertRaises(IndexError):
self.get(TabularMSA([DNA('AC')]), (Ellipsis, [0, 1, 2]))
def test_fancy_empty_both_axis(self):
msa = TabularMSA([DNA("ACGT", metadata={'x': 1}),
DNA("TGCA", metadata={'y': 2})], index=list("AB"))
new_np_simple = self.get(msa, (np.arange(0), np.arange(0)))
new_list_simple = self.get(msa, ([], []))
self.assertEqual(TabularMSA([]), new_np_simple)
self.assertEqual(TabularMSA([]), new_list_simple)
def test_fancy_standard_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, [0, 2]),
TabularMSA([a, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]},
index=[0, 2]))
def test_fancy_standard_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (Ellipsis, [0, 2])),
TabularMSA([a[0, 2], b[0, 2], c[0, 2]],
metadata={3: 3},
positional_metadata={3: [1, 3]},
index=[0, 1, 2]))
def test_fancy_standard_both_axes(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, ([0, 2], [0, 2])),
TabularMSA([a[0, 2], c[0, 2]],
metadata={3: 3},
positional_metadata={3: [1, 3]},
index=[0, 2]))
def test_fancy_empty_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
# TODO: Change for #1198
self.assertEqual(self.get(msa, []),
TabularMSA([], metadata={3: 3}))
def test_fancy_empty_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (Ellipsis, [])),
TabularMSA([a[0:0], b[0:0], c[0:0]],
metadata={3: 3},
positional_metadata={3: np.array(
[], dtype=int)}))
def test_fancy_empty_both_axes(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
# TODO: Change for #1198
self.assertEqual(self.get(msa, ([], [])),
TabularMSA([], metadata={3: 3}))
def test_fancy_out_of_bounds_first_axis(self):
msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
with self.assertRaises(IndexError):
self.get(msa, [10])
with self.assertRaises(IndexError):
self.get(msa, [0, 1, 10])
def test_fancy_out_of_bounds_second_axis(self):
msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [10]))
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, [1, 2, 4]))
def test_get_scalar_first_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GG", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b])
new0 = self.get(msa, 0)
new1 = self.get(msa, 1)
self.assertEqual(new0, a)
self.assertEqual(new1, b)
def test_get_scalar_second_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
new0 = self.get(msa, (Ellipsis, 0))
new1 = self.get(msa, (Ellipsis, 1))
self.assertEqual(new0,
Sequence("AG", metadata={'z': 5},
positional_metadata={'x': [1, np.nan],
'y': [np.nan, 3]}))
self.assertEqual(new1,
Sequence("AC", metadata={'z': 6},
positional_metadata={'x': [2, np.nan],
'y': [np.nan, 4]}))
def test_scalar_sliced_first_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, (1, [1, 3])),
DNA("CT", metadata={1: 1},
positional_metadata={1: [2, 4]}))
def test_scalar_sliced_second_axis(self):
a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
b = DNA("ACGA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
msa = TabularMSA([a, b, c], metadata={3: 3},
positional_metadata={3: [1, 2, 3, 4]})
self.assertEqual(self.get(msa, ([1, 2], 3)),
Sequence("AT", metadata={3: 4},
positional_metadata={1: [4, np.nan],
2: [np.nan, 4]}))
def test_get_scalar_out_of_bound_first_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
with self.assertRaises(IndexError):
self.get(msa, 3)
def test_get_scalar_out_of_bound_second_axis(self):
a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
with self.assertRaises(IndexError):
self.get(msa, (Ellipsis, 3))
class TestGetItem(SharedIndexTests, unittest.TestCase):
def get(self, obj, indexable):
return obj[indexable]
def test_uses_iloc_not_loc(self):
a = DNA("ACGA")
b = DNA("ACGT")
msa = TabularMSA([a, b], index=[1, 0])
self.assertIs(msa[0], a)
self.assertIs(msa[1], b)
class TestConstructor(unittest.TestCase):
def setUp(self):
self.seqs = [DNA("ACGT"), DNA("GCTA")]
self.m = {'x': 'y', 0: 1}
self.pm = pd.DataFrame({'foo': [1, 2, 3, 4]})
self.index = pd.Index(['a', 'b'])
self.msa = TabularMSA(self.seqs, metadata=self.m,
positional_metadata=self.pm, index=self.index)
def test_no_override(self):
result = self.msa._constructor_()
self.assertEqual(self.msa, result)
for seq1, seq2 in zip(result, self.msa):
self.assertIsNot(seq1, seq2)
self.assertIsNot(result.metadata, self.msa.metadata)
self.assertIsNot(result.positional_metadata,
self.msa.positional_metadata)
def test_sequence_override_same_seqs(self):
result = self.msa._constructor_(sequences=self.seqs)
self.assertEqual(self.msa, result)
for seq1, seq2 in zip(result, self.msa):
self.assertIsNot(seq1, seq2)
self.assertIsNot(result.metadata, self.msa.metadata)
self.assertIsNot(result.positional_metadata,
self.msa.positional_metadata)
def test_sequence_override(self):
seqs = [RNA("ACGU"), RNA("GCUA")]
result = self.msa._constructor_(sequences=seqs)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
def test_no_override_no_md(self):
msa = TabularMSA(self.seqs, index=self.index)
self.assertEqual(msa, msa._constructor_())
def test_metadata_override(self):
new_md = {'foo': {'x': 0}}
result = self.msa._constructor_(metadata=new_md)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, new_md)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
def test_positional_metadata_override(self):
new_pm = pd.DataFrame({'x': [1, 2, 3, 4]})
result = self.msa._constructor_(positional_metadata=new_pm)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, self.index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, new_pm)
def test_index_override(self):
new_index = pd.Index([('a', 0), ('b', 1)])
result = self.msa._constructor_(index=new_index)
self.assertNotEqual(result, self.msa)
self.assertEqual(list(result), self.seqs)
assert_index_equal(result.index, new_index)
self.assertEqual(result.metadata, self.m)
assert_data_frame_almost_equal(result.positional_metadata, self.pm)
class TestAppend(unittest.TestCase):
# Error cases
def test_invalid_minter_index_reset_index_parameter_combos(self):
msa = TabularMSA([])
param_combos = (
{},
{'minter': str, 'index': 'foo', 'reset_index': True},
{'minter': str, 'index': 'foo'},
{'minter': str, 'reset_index': True},
{'index': 'foo', 'reset_index': True}
)
for params in param_combos:
with self.assertRaisesRegex(ValueError,
r"one of.*minter.*index.*reset_index"):
msa.append(DNA('ACGT'), **params)
self.assertEqual(msa, TabularMSA([]))
def test_invalid_dtype(self):
msa = TabularMSA([])
with self.assertRaisesRegex(TypeError, r'GrammaredSequence.*Sequence'):
msa.append(Sequence(''), reset_index=True)
self.assertEqual(msa, TabularMSA([]))
def test_dtype_mismatch_rna(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, r'matching type.*RNA.*DNA'):
msa.append(RNA('UUUU'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_dtype_mismatch_float(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, r'matching type.*float.*DNA'):
msa.append(42.0, reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(
ValueError, r'must match the number of positions.*5 != 4'):
msa.append(DNA('ACGTA'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_invalid_minter(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(KeyError):
msa.append(DNA('AAAA'), minter='id')
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
# Valid cases: `minter`
def test_minter_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), minter=str)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], minter=str))
def test_minter_metadata_key(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.append(DNA('', metadata={'id': 'c'}), minter='id')
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('', metadata={'id': 'c'})], minter='id'))
def test_minter_callable(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.append(DNA(''), minter=str)
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('')], index=['a', 'b', '']))
def test_multiindex_minter_empty_msa(self):
def multiindex_minter(seq):
return ('foo', 42)
msa = TabularMSA([])
msa.append(DNA('AC'), minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42)]))
def test_multiindex_minter_non_empty_msa(self):
def multiindex_minter(seq):
return ('baz', 44)
msa = TabularMSA([RNA('UU'), RNA('CA')],
index=[('foo', 42), ('bar', 43)])
msa.append(RNA('AC'), minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
# Valid cases: `index`
def test_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), index='a')
self.assertEqual(
msa,
TabularMSA([DNA('ACGT')], index=['a']))
def test_index_non_empty_msa(self):
msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
msa.append(DNA('--'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('GT'), DNA('--')],
index=['a', 'b', 'foo']))
def test_multiindex_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('AA'), index=('foo', 42))
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42)]))
def test_multiindex_index_non_empty_msa(self):
msa = TabularMSA([RNA('A'), RNA('C')],
index=[('foo', 42), ('bar', 43)])
msa.append(RNA('U'), index=('baz', 44))
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
# Valid cases: `reset_index`
def test_reset_index_empty_msa(self):
msa = TabularMSA([])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(1))
def test_reset_index_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('CCCC')])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_reset_index_non_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('CCCC')], index=['foo', 'bar'])
msa.append(DNA('ACGT'), reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_reset_index_bool_cast(self):
msa = TabularMSA([RNA('AC'), RNA('UU')], index=[42, 43])
msa.append(RNA('..'), reset_index='abc')
self.assertEqual(msa, TabularMSA([RNA('AC'), RNA('UU'), RNA('..')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
# Valid cases (misc)
def test_index_type_change(self):
msa = TabularMSA([DNA('A'), DNA('.')])
msa.append(DNA('C'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
def test_duplicate_index(self):
msa = TabularMSA([DNA('A'), DNA('.')], index=['foo', 'bar'])
msa.append(DNA('C'), index='foo')
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C')],
index=['foo', 'bar', 'foo']))
def test_empty_msa_with_positional_metadata_no_new_positions(self):
msa = TabularMSA([], positional_metadata={'foo': []})
msa.append(DNA(''), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('')], positional_metadata={'foo': []}))
def test_empty_msa_with_positional_metadata_add_new_positions(self):
# bug in 0.4.2
msa = TabularMSA([], positional_metadata={'foo': []})
msa.append(DNA('AA'), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('AA')]))
class TestExtend(unittest.TestCase):
# Error cases
#
# Note: these tests check that the MSA isn't mutated when an error is
# raised. Where applicable, the "invalid" sequence is preceded by valid
# sequence(s) to test one possible (buggy) implementation of `extend`:
# looping over `sequences` and calling `append`. These tests ensure that
# valid sequences aren't appended to the MSA before the error is raised.
def test_invalid_minter_index_reset_index_parameter_combos(self):
msa = TabularMSA([])
param_combos = (
{},
{'minter': str, 'index': 'foo', 'reset_index': True},
{'minter': str, 'index': 'foo'},
{'minter': str, 'reset_index': True},
{'index': 'foo', 'reset_index': True}
)
for params in param_combos:
with self.assertRaisesRegex(ValueError,
r"one of.*minter.*index.*reset_index"):
msa.extend([DNA('ACGT')], **params)
self.assertEqual(msa, TabularMSA([]))
def test_from_tabular_msa_index_param_still_required(self):
msa = TabularMSA([DNA('AC'), DNA('TG')])
with self.assertRaisesRegex(ValueError,
r"one of.*minter.*index.*reset_index"):
msa.extend(TabularMSA([DNA('GG'), DNA('CC')]))
self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('TG')]))
def test_invalid_dtype(self):
msa = TabularMSA([])
with self.assertRaisesRegex(TypeError, r'GrammaredSequence.*Sequence'):
msa.extend([Sequence('')], reset_index=True)
self.assertEqual(msa, TabularMSA([]))
def test_dtype_mismatch_rna(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, r'matching type.*RNA.*DNA'):
msa.extend([DNA('----'), RNA('UUUU')], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_dtype_mismatch_float(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(TypeError, r'matching type.*float.*DNA'):
msa.extend([DNA('GGGG'), 42.0], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
with self.assertRaisesRegex(
ValueError, r'must match the number of positions.*5 != 4'):
msa.extend([DNA('TTTT'), DNA('ACGTA')], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_invalid_minter(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(KeyError):
msa.extend([DNA('AAAA', metadata={'id': 'foo'}),
DNA('----')], minter='id')
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
def test_invalid_index(self):
msa = TabularMSA([DNA('ACGT')], index=['foo'])
with self.assertRaises(TypeError):
msa.extend([DNA('----')], index=42)
self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
def test_sequences_index_length_mismatch(self):
msa = TabularMSA([])
with self.assertRaisesRegex(ValueError,
r'sequences.*2.*index length.*3'):
msa.extend([DNA('TTTT'), DNA('ACGT')], index=['a', 'b', 'c'])
self.assertEqual(msa, TabularMSA([]))
# Valid cases: `minter`
def test_minter_empty_msa(self):
msa = TabularMSA([])
msa.extend([RNA('UU'), RNA('--')], minter=str)
self.assertEqual(msa, TabularMSA([RNA('UU'), RNA('--')], minter=str))
def test_minter_metadata_key(self):
msa = TabularMSA([DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'})],
minter='id')
msa.extend([DNA('', metadata={'id': 'c'}),
DNA('', metadata={'id': 'd'})], minter='id')
self.assertEqual(
msa,
TabularMSA([
DNA('', metadata={'id': 'a'}),
DNA('', metadata={'id': 'b'}),
DNA('', metadata={'id': 'c'}),
DNA('', metadata={'id': 'd'})], minter='id'))
def test_minter_callable(self):
msa = TabularMSA([DNA('A', metadata={'id': 'a'}),
DNA('C', metadata={'id': 'b'})],
minter='id')
msa.extend([DNA('G'), DNA('T')], minter=str)
self.assertEqual(
msa,
TabularMSA([
DNA('A', metadata={'id': 'a'}),
DNA('C', metadata={'id': 'b'}),
DNA('G'),
DNA('T')], index=['a', 'b', 'G', 'T']))
def test_multiindex_minter_empty_msa(self):
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa = TabularMSA([])
msa.extend([DNA('AC'), DNA('GG')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_multiindex_minter_non_empty_msa(self):
def multiindex_minter(seq):
if str(seq) == 'C':
return ('baz', 44)
else:
return ('baz', 45)
msa = TabularMSA([DNA('A'), DNA('G')],
index=[('foo', 42), ('bar', 43)])
msa.extend([DNA('C'), DNA('T')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
# Valid cases: `index`
def test_index_empty_msa(self):
msa = TabularMSA([])
msa.extend([RNA('UAC'), RNA('AAU')], index=['foo', 'bar'])
self.assertEqual(msa, TabularMSA([RNA('UAC'), RNA('AAU')],
index=['foo', 'bar']))
def test_index_non_empty_msa(self):
msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
msa.extend([DNA('--'), DNA('..')], index=['foo', 'bar'])
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('GT'), DNA('--'), DNA('..')],
index=['a', 'b', 'foo', 'bar']))
def test_multiindex_index_empty_msa(self):
msa = TabularMSA([])
msa.extend([DNA('AA'), DNA('GG')], index=[('foo', 42), ('bar', 43)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_multiindex_index_non_empty_msa(self):
msa = TabularMSA([DNA('.'), DNA('-')],
index=[('foo', 42), ('bar', 43)])
msa.extend([DNA('A'), DNA('G')], index=[('baz', 44), ('baz', 45)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(
msa.index,
pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
def test_index_object_empty_msa(self):
msa = TabularMSA([])
msa.extend([DNA('AA'), DNA('GG')], index=pd.RangeIndex(2))
self.assertEqual(msa, TabularMSA([DNA('AA'), DNA('GG')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_index_object_non_empty_msa(self):
msa = TabularMSA([DNA('CT'), DNA('GG')])
msa.extend([DNA('AA'), DNA('GG')], index=pd.RangeIndex(2))
self.assertEqual(
msa,
TabularMSA([DNA('CT'), DNA('GG'), DNA('AA'), DNA('GG')],
index=[0, 1, 0, 1]))
# Valid cases: `reset_index`
def test_reset_index_empty_msa(self):
msa = TabularMSA([])
msa.extend([DNA('ACGT'), DNA('----')], reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('----')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_reset_index_empty_msa_empty_iterable(self):
msa = TabularMSA([])
msa.extend([], reset_index=True)
self.assertEqual(msa, TabularMSA([]))
assert_index_equal(msa.index, pd.RangeIndex(0))
def test_reset_index_non_empty_msa_empty_iterable(self):
msa = TabularMSA([RNA('UU'), RNA('CC')], index=['a', 'b'])
msa.extend([], reset_index=True)
self.assertEqual(msa, TabularMSA([RNA('UU'), RNA('CC')]))
assert_index_equal(msa.index, pd.RangeIndex(2))
def test_reset_index_default_index(self):
msa = TabularMSA([DNA('A'), DNA('G')])
msa.extend([DNA('.'), DNA('-')], reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('A'), DNA('G'), DNA('.'), DNA('-')]))
assert_index_equal(msa.index, pd.RangeIndex(4))
def test_reset_index_non_default_index(self):
msa = TabularMSA([DNA('A'), DNA('G')], index=['a', 'b'])
msa.extend([DNA('.'), DNA('-')], reset_index=True)
self.assertEqual(msa,
TabularMSA([DNA('A'), DNA('G'), DNA('.'), DNA('-')]))
assert_index_equal(msa.index, pd.RangeIndex(4))
def test_reset_index_from_tabular_msa(self):
msa = TabularMSA([DNA('AC'), DNA('TG')], index=[42, 43])
msa.extend(TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')],
index=['a', 'b', 'c']), reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
DNA('AA')]))
assert_index_equal(msa.index, pd.RangeIndex(5))
def test_reset_index_bool_cast(self):
msa = TabularMSA([RNA('AC'), RNA('UU')], index=[42, 43])
msa.extend([RNA('..')], reset_index='abc')
self.assertEqual(msa, TabularMSA([RNA('AC'), RNA('UU'), RNA('..')]))
assert_index_equal(msa.index, pd.RangeIndex(3))
# Valid cases (misc)
def test_index_type_change(self):
msa = TabularMSA([DNA('A'), DNA('.')])
msa.extend([DNA('C')], index=['foo'])
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
def test_duplicate_index(self):
msa = TabularMSA([DNA('A'), DNA('.')], index=['foo', 'bar'])
msa.extend([DNA('C'), DNA('.')], index=['foo', 'baz'])
self.assertEqual(
msa,
TabularMSA([DNA('A'), DNA('.'), DNA('C'), DNA('.')],
index=['foo', 'bar', 'foo', 'baz']))
def test_empty_msa_with_positional_metadata_no_new_positions(self):
msa = TabularMSA([], positional_metadata={'foo': []})
msa.extend([DNA(''), DNA('')], reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA(''), DNA('')], positional_metadata={'foo': []}))
def test_empty_msa_with_positional_metadata_add_new_positions(self):
# bug in 0.4.2
msa = TabularMSA([], positional_metadata={'foo': []})
msa.extend([DNA('AA'), DNA('GG')], reset_index=True)
self.assertEqual(
msa,
TabularMSA([DNA('AA'),
DNA('GG')]))
def test_empty_msa_empty_iterable(self):
msa = TabularMSA([])
msa.extend([], minter=str)
self.assertEqual(msa, TabularMSA([]))
def test_non_empty_msa_empty_iterable(self):
msa = TabularMSA([DNA('AC')], index=['foo'])
msa.extend([], index=[])
self.assertEqual(msa, TabularMSA([DNA('AC')], index=['foo']))
def test_single_sequence(self):
msa = TabularMSA([DNA('AC')])
msa.extend([DNA('-C')], minter=str)
self.assertEqual(msa,
TabularMSA([DNA('AC'), DNA('-C')], index=[0, '-C']))
def test_multiple_sequences(self):
msa = TabularMSA([DNA('AC')])
msa.extend([DNA('-C'), DNA('AG')], minter=str)
self.assertEqual(msa,
TabularMSA([DNA('AC'), DNA('-C'), DNA('AG')],
index=[0, '-C', 'AG']))
def test_from_iterable(self):
msa = TabularMSA([])
msa.extend(iter([DNA('ACGT'), DNA('TGCA')]), reset_index=True)
self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
def test_from_tabular_msa_with_index(self):
msa1 = TabularMSA([DNA('AC'), DNA('TG')])
msa2 = TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')])
msa1.extend(msa2, index=msa2.index)
self.assertEqual(
msa1,
TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
DNA('AA')], index=[0, 1, 0, 1, 2]))
class TestJoin(unittest.TestCase):
def assertEqualJoinedMSA(self, msa1, msa2):
# `TabularMSA.join` doesn't guarantee index order in the joined MSA.
# The order differs across pandas versions, so sort each MSA before
# comparing for equality.
# copy because `TabularMSA.sort` is in-place.
msa1 = copy.copy(msa1)
msa2 = copy.copy(msa2)
msa1.sort()
msa2.sort()
self.assertEqual(msa1, msa2)
def test_invalid_how(self):
with self.assertRaisesRegex(ValueError, r'`how`'):
TabularMSA([]).join(TabularMSA([]), how='really')
def test_invalid_other_type(self):
with self.assertRaisesRegex(TypeError, r'TabularMSA.*DNA'):
TabularMSA([]).join(DNA('ACGT'))
def test_dtype_mismatch(self):
with self.assertRaisesRegex(TypeError, r'dtype.*RNA.*DNA'):
TabularMSA([DNA('AC')]).join(TabularMSA([RNA('UG')]))
with self.assertRaisesRegex(TypeError, r'dtype.*None.*DNA'):
TabularMSA([DNA('AC')]).join(TabularMSA([]))
with self.assertRaisesRegex(TypeError, r'dtype.*DNA.*None'):
TabularMSA([]).join(TabularMSA([DNA('AC')]))
def test_duplicate_index_labels(self):
with self.assertRaisesRegex(ValueError,
r"This MSA's index labels.*unique"):
TabularMSA([DNA('AC'), DNA('--')], index=[0, 0]).join(
TabularMSA([DNA('GT'), DNA('..')]))
with self.assertRaisesRegex(ValueError,
r"`other`'s index labels.*unique"):
TabularMSA([DNA('AC'), DNA('--')]).join(
TabularMSA([DNA('GT'), DNA('..')], index=[0, 0]))
def test_no_metadata(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.')])
msa2 = TabularMSA([DNA('-C'),
DNA('.G')])
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('AC-C'),
DNA('G..G')]))
def test_ignores_metadata(self):
msa1 = TabularMSA([DNA('AC', metadata={'id': 'a'}),
DNA('G.', metadata={'id': 'b'}),
DNA('C-', metadata={'id': 'c'})],
metadata={'id': 'msa1'})
msa2 = TabularMSA([DNA('-C', metadata={'id': 'd'}),
DNA('.G', metadata={'id': 'e'}),
DNA('CA', metadata={'id': 'f'})], index=[2, 1, 0],
metadata={'id': 'msa2'})
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('ACCA'),
DNA('G..G'),
DNA('C--C')]))
def test_outer_join_on_per_sequence_positional_metadata(self):
msa1 = TabularMSA([
DNA('AC', positional_metadata={'1': [1, 2], 'foo': ['a', 'b']}),
DNA('GT', positional_metadata={'2': [3, 4], 'foo': ['c', 'd']})])
msa2 = TabularMSA([
DNA('CA', positional_metadata={'3': [5, 6], 'foo': ['e', 'f']}),
DNA('TG', positional_metadata={'4': [7, 8], 'foo': ['g', 'h']})])
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([
DNA('ACCA',
positional_metadata={'1': [1, 2, np.nan, np.nan],
'3': [np.nan, np.nan, 5, 6],
'foo': ['a', 'b', 'e', 'f']}),
DNA('GTTG',
positional_metadata={'2': [3, 4, np.nan, np.nan],
'4': [np.nan, np.nan, 7, 8],
'foo': ['c', 'd', 'g', 'h']})]))
def test_no_sequences(self):
msa1 = TabularMSA([], positional_metadata={'foo': []})
msa2 = TabularMSA([], positional_metadata={'foo': []})
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(joined, TabularMSA([]))
def test_no_positions(self):
msa1 = TabularMSA([DNA('', positional_metadata={'1': []}),
DNA('', positional_metadata={'2': []})],
positional_metadata={'foo': []})
msa2 = TabularMSA([DNA('', positional_metadata={'3': []}),
DNA('', positional_metadata={'4': []})],
positional_metadata={'foo': []})
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('', positional_metadata={'1': [], '3': []}),
DNA('', positional_metadata={'2': [], '4': []})],
positional_metadata={'foo': []}))
def test_one_with_positions_one_without_positions(self):
msa1 = TabularMSA([DNA('A', positional_metadata={'1': ['a']}),
DNA('C', positional_metadata={'2': ['b']})],
positional_metadata={'foo': ['bar']})
msa2 = TabularMSA([DNA('', positional_metadata={'3': []}),
DNA('', positional_metadata={'4': []})],
positional_metadata={'foo': []})
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('A', positional_metadata={'1': ['a'],
'3': [np.nan]}),
DNA('C', positional_metadata={'2': ['b'],
'4': [np.nan]})],
positional_metadata={'foo': ['bar']}))
def test_how_strict(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-')],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-C'),
DNA('.G'),
DNA('CA')], index=[2, 1, 0],
positional_metadata={'foo': [3, 4],
'bar': ['c', 'd']})
joined = msa1.join(msa2)
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('ACCA'),
DNA('G..G'),
DNA('C--C')],
positional_metadata={'bar': ['a', 'b', 'c', 'd'],
'foo': [1, 2, 3, 4]}))
def test_how_strict_failure_index_mismatch(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-')])
msa2 = TabularMSA([DNA('-C'),
DNA('.G'),
DNA('CA'),
DNA('--')])
with self.assertRaisesRegex(ValueError, r'Index labels must all '
'match'):
msa1.join(msa2)
def test_how_strict_failure_positional_metadata_mismatch(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.')],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-C'),
DNA('.G')],
positional_metadata={'foo': [3, 4]})
with self.assertRaisesRegex(ValueError,
r'Positional metadata columns.*match'):
msa1.join(msa2)
def test_how_inner(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-'),
DNA('--')], index=[0, 1, 2, 3],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-C'),
DNA('.G'),
DNA('CA'),
DNA('..')], index=[2, 1, 0, -1],
positional_metadata={'foo': [3, 4],
'baz': ['c', 'd']})
joined = msa1.join(msa2, how='inner')
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('C--C'),
DNA('G..G'),
DNA('ACCA')], index=[2, 1, 0],
positional_metadata={'foo': [1, 2, 3, 4]}))
def test_how_inner_no_positional_metadata_overlap(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.')], index=['b', 'a'],
positional_metadata={'foo': [1, 2]})
msa2 = TabularMSA([DNA('-C'),
DNA('.G')], index=['a', 'b'],
positional_metadata={'bar': ['c', 'd']})
joined = msa1.join(msa2, how='inner')
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('G.-C'),
DNA('AC.G')], index=['a', 'b']))
def test_how_inner_no_index_overlap_with_positional_metadata_overlap(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.')],
positional_metadata={'foo': [1, 2]})
msa2 = TabularMSA([DNA('-C'),
DNA('.G')], index=['a', 'b'],
positional_metadata={'foo': [3, 4]})
joined = msa1.join(msa2, how='inner')
self.assertEqualJoinedMSA(joined, TabularMSA([]))
def test_how_outer(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-'),
DNA('--')], index=[0, 1, 2, 3],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-CC'),
DNA('.GG'),
DNA('CAA'),
DNA('...')], index=[2, 1, 0, -1],
positional_metadata={'foo': [3, 4, 5],
'baz': ['c', 'd', 'e']})
joined = msa1.join(msa2, how='outer')
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('--...'),
DNA('ACCAA'),
DNA('G..GG'),
DNA('C--CC'),
DNA('-----')], index=range(-1, 4),
positional_metadata={
'bar': ['a', 'b', np.nan, np.nan, np.nan],
'baz': [np.nan, np.nan, 'c', 'd', 'e'],
'foo': [1, 2, 3, 4, 5]}))
def test_how_left(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-'),
DNA('--')], index=[0, 1, 2, 3],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-CC'),
DNA('.GG'),
DNA('CAA'),
DNA('...')], index=[2, 1, 0, -1],
positional_metadata={'foo': [3, 4, 5],
'baz': ['c', 'd', 'e']})
joined = msa1.join(msa2, how='left')
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('ACCAA'),
DNA('G..GG'),
DNA('C--CC'),
DNA('-----')],
positional_metadata={
'foo': [1, 2, 3, 4, 5],
'bar': ['a', 'b', np.nan, np.nan, np.nan]}))
def test_how_right(self):
msa1 = TabularMSA([DNA('AC'),
DNA('G.'),
DNA('C-'),
DNA('--')], index=[0, 1, 2, 3],
positional_metadata={'foo': [1, 2],
'bar': ['a', 'b']})
msa2 = TabularMSA([DNA('-CC'),
DNA('.GG'),
DNA('CAA'),
DNA('...')], index=[2, 1, 0, -1],
positional_metadata={'foo': [3, 4, 5],
'baz': ['c', 'd', 'e']})
joined = msa1.join(msa2, how='right')
self.assertEqualJoinedMSA(
joined,
TabularMSA([DNA('C--CC'),
DNA('G..GG'),
DNA('ACCAA'),
DNA('--...')], index=[2, 1, 0, -1],
positional_metadata={
'foo': [1, 2, 3, 4, 5],
'baz': [np.nan, np.nan, 'c', 'd', 'e']}))
class TestIterPositions(unittest.TestCase):
def test_method_return_type(self):
msa = TabularMSA([DNA('AC'),
DNA('GT')])
obs = msa.iter_positions()
self.assertIsInstance(obs, types.GeneratorType)
def test_position_type(self):
msa = TabularMSA([DNA('AC'),
DNA('GT')])
first_position = next(msa.iter_positions())
# Type should be *exactly* Sequence.
self.assertIs(type(first_position), Sequence)
def test_no_sequences(self):
msa = TabularMSA([])
obs = list(msa.iter_positions())
self.assertEqual(obs, [])
def test_no_sequences_ignore_metadata(self):
msa = TabularMSA([])
obs = list(msa.iter_positions(ignore_metadata=True))
self.assertEqual(obs, [])
def test_no_sequences_reverse(self):
msa = TabularMSA([])
obs = list(msa.iter_positions(reverse=True))
self.assertEqual(obs, [])
def test_no_sequences_reverse_ignore_metadata(self):
msa = TabularMSA([])
obs = list(msa.iter_positions(reverse=True, ignore_metadata=True))
self.assertEqual(obs, [])
def test_no_positions(self):
msa = TabularMSA([DNA(''),
DNA('')])
obs = list(msa.iter_positions())
self.assertEqual(obs, [])
def test_no_positions_ignore_metadata(self):
msa = TabularMSA([DNA(''),
DNA('')])
obs = list(msa.iter_positions(ignore_metadata=True))
self.assertEqual(obs, [])
def test_no_positions_reverse(self):
msa = TabularMSA([DNA(''),
DNA('')])
obs = list(msa.iter_positions(reverse=True))
self.assertEqual(obs, [])
def test_no_positions_reverse_ignore_metadata(self):
msa = TabularMSA([DNA(''),
DNA('')])
obs = list(msa.iter_positions(reverse=True, ignore_metadata=True))
self.assertEqual(obs, [])
def test_single_position(self):
msa = TabularMSA([DNA('A')])
obs = list(msa.iter_positions())
self.assertEqual(obs, [Sequence('A')])
def test_single_position_reverse(self):
msa = TabularMSA([DNA('A'),
DNA('T')])
obs = list(msa.iter_positions(reverse=True))
self.assertEqual(obs, [Sequence('AT')])
def test_multiple_positions(self):
msa = TabularMSA([DNA('ACGT'),
DNA('A-G.'),
DNA('----')])
obs = list(msa.iter_positions())
self.assertEqual(obs,
[Sequence('AA-'), Sequence('C--'), Sequence('GG-'),
Sequence('T.-')])
def test_multiple_positions_reverse(self):
msa = TabularMSA([DNA('AC'),
DNA('A-'),
DNA('--')])
obs = list(msa.iter_positions(reverse=True))
self.assertEqual(obs,
[Sequence('C--'), Sequence('AA-')])
def test_with_positional_metadata(self):
# MSA *and* sequence positional metadata.
msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
seqs = [
DNA('AC', positional_metadata={'foo': [42, 43]}),
DNA('A-'),
DNA('--', positional_metadata={'foo': [-1, -2],
'bar': ['baz', 'bazz']})]
msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
obs = list(msa.iter_positions())
self.assertEqual(
obs,
[Sequence('AA-', metadata={'pm1': 0.5, 'foo': 9},
positional_metadata={'bar': [np.nan, np.nan, 'baz'],
'foo': [42, np.nan, -1]}),
Sequence('C--', metadata={'pm1': 1.5, 'foo': 99},
positional_metadata={'bar': [np.nan, np.nan, 'bazz'],
'foo': [43, np.nan, -2]})])
def test_with_positional_metadata_reverse(self):
# MSA *and* sequence positional metadata.
msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
seqs = [
DNA('AC', positional_metadata={'foo': [42, 43]}),
DNA('A-'),
DNA('--', positional_metadata={'foo': [-1, -2],
'bar': ['baz', 'bazz']})]
msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
obs = list(msa.iter_positions(reverse=True))
self.assertEqual(
obs,
[Sequence('C--', metadata={'pm1': 1.5, 'foo': 99},
positional_metadata={'bar': [np.nan, np.nan, 'bazz'],
'foo': [43, np.nan, -2]}),
Sequence('AA-', metadata={'pm1': 0.5, 'foo': 9},
positional_metadata={'bar': [np.nan, np.nan, 'baz'],
'foo': [42, np.nan, -1]})])
def test_with_positional_metadata_ignore_metadata(self):
# MSA *and* sequence positional metadata.
msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
seqs = [
DNA('AC', positional_metadata={'foo': [42, 43]}),
DNA('A-'),
DNA('--', positional_metadata={'foo': [-1, -2],
'bar': ['baz', 'bazz']})]
msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
obs = list(msa.iter_positions(ignore_metadata=True))
self.assertEqual(obs, [Sequence('AA-'), Sequence('C--')])
class TestConsensus(unittest.TestCase):
def test_no_sequences(self):
msa = TabularMSA([])
cons = msa.consensus()
self.assertEqual(cons, Sequence(''))
def test_no_positions(self):
msa = TabularMSA([DNA(''),
DNA('')])
cons = msa.consensus()
self.assertEqual(cons, DNA(''))
def test_single_sequence(self):
msa = TabularMSA([DNA('ACGT-.')])
cons = msa.consensus()
self.assertEqual(cons, DNA('ACGT--'))
def test_multiple_sequences(self):
msa = TabularMSA([DNA('ACGT'),
DNA('AG-.'),
DNA('AC-.')])
cons = msa.consensus()
self.assertEqual(cons, DNA('AC--'))
def test_ties(self):
msa = TabularMSA([DNA('A-'),
DNA('C-'),
DNA('G-')])
cons = msa.consensus()
self.assertTrue(cons in [DNA('A-'), DNA('C-'), DNA('G-')])
def test_ties_with_gaps(self):
msa = TabularMSA([DNA('-'),
DNA('.'),
DNA('T'),
DNA('T')])
cons = msa.consensus()
self.assertTrue(cons in [DNA('T'), DNA('-')])
def test_default_gap_char(self):
msa = TabularMSA([DNA('.'),
DNA('.'),
DNA('.')])
cons = msa.consensus()
self.assertEqual(cons, DNA('-'))
def test_different_dtype(self):
msa = TabularMSA([RNA('---'),
RNA('AG-'),
RNA('AGG')])
cons = msa.consensus()
self.assertEqual(cons, RNA('AG-'))
def test_with_positional_metadata(self):
# Defining *all* types of metadata to ensure correct metadata is
# propagated to majority consensus sequence.
seqs = [
DNA('-.-', metadata={'id': 'seq1'},
positional_metadata={'qual': range(0, 3)}),
DNA('A.T', metadata={'id': 'seq2'},
positional_metadata={'qual': range(3, 6)}),
DNA('ACT', metadata={'id': 'seq3'},
positional_metadata={'qual': range(6, 9)})
]
msa = TabularMSA(seqs, metadata={'pubmed': 123456},
positional_metadata={'foo': [42, 43, 42],
'bar': ['a', 'b', 'c']})
cons = msa.consensus()
self.assertEqual(
cons,
DNA('A-T', positional_metadata={'foo': [42, 43, 42],
'bar': ['a', 'b', 'c']}))
def test_mixed_gap_characters_as_majority(self):
seqs = [
DNA('A'),
DNA('A'),
DNA('A'),
DNA('A'),
DNA('.'),
DNA('.'),
DNA('.'),
DNA('-'),
DNA('-')
]
msa = TabularMSA(seqs)
cons = msa.consensus()
self.assertEqual(cons, DNA('-'))
class TestConservation(unittest.TestCase):
def test_no_sequences(self):
msa = TabularMSA([])
cons = msa.conservation()
npt.assert_array_equal(cons, np.array([]))
def test_shannon_entropy_dna(self):
msa = TabularMSA([DNA('A'),
DNA('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([DNA('A'),
DNA('G'),
DNA('C'),
DNA('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([DNA('AAC'),
DNA('GAC')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([1.0], base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([DNA('AACT'),
DNA('GACA')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
def test_shannon_entropy_rna(self):
msa = TabularMSA([RNA('A'),
RNA('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([RNA('A'),
RNA('G'),
RNA('C'),
RNA('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([RNA('AAC'),
RNA('GAC')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([1.0], base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([RNA('AACU'),
RNA('GACA')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
def test_shannon_entropy_protein(self):
msa = TabularMSA([Protein('A'),
Protein('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=22)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([Protein('A'),
Protein('G'),
Protein('C'),
Protein('G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
base=22)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([Protein('AAC'),
Protein('GAC')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=22),
1. - scipy.stats.entropy([1.0], base=22),
1. - scipy.stats.entropy([1.0], base=22)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([Protein('AACT'),
Protein('GACA')])
actual = msa.conservation(metric='inverse_shannon_uncertainty')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=22),
1. - scipy.stats.entropy([1.0], base=22),
1. - scipy.stats.entropy([1.0], base=22),
1. - scipy.stats.entropy([0.5, 0.5], base=22)])
npt.assert_array_equal(actual, expected)
def test_degenerate_mode_nan(self):
msa = TabularMSA([DNA('NAC'),
DNA('NNC')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
degenerate_mode='nan')
expected = np.array([np.nan,
np.nan,
1. - scipy.stats.entropy([1.0], base=4)])
npt.assert_array_equal(actual, expected)
def test_degenerate_mode_error(self):
msa = TabularMSA([DNA('NACN'),
DNA('NNCA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error')
msa = TabularMSA([DNA('AACA'),
DNA('ANCA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error')
def test_error_on_degenerate_w_nan_on_gap(self):
msa = TabularMSA([DNA('-ACA'),
DNA('-NCA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error',
gap_mode='nan')
def test_column_with_degen_and_gap(self):
msa = TabularMSA([DNA('N'),
DNA('-')])
# test all eight combinations of gap_mode and degenerate_mode
actual = msa.conservation(metric='inverse_shannon_uncertainty',
degenerate_mode='nan',
gap_mode='nan')
npt.assert_array_equal(actual, np.array([np.nan]))
actual = msa.conservation(metric='inverse_shannon_uncertainty',
degenerate_mode='nan',
gap_mode='ignore')
npt.assert_array_equal(actual, np.array([np.nan]))
actual = msa.conservation(metric='inverse_shannon_uncertainty',
degenerate_mode='nan',
gap_mode='include')
npt.assert_array_equal(actual, np.array([np.nan]))
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='nan',
gap_mode='error')
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error',
gap_mode='nan')
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error',
gap_mode='error')
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error',
gap_mode='include')
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
degenerate_mode='error',
gap_mode='ignore')
def test_gap_mode_nan(self):
msa = TabularMSA([DNA('-AC.'),
DNA('--CA')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='nan')
expected = np.array([np.nan,
np.nan,
1. - scipy.stats.entropy([1.0], base=4),
np.nan])
npt.assert_array_equal(actual, expected)
def test_gap_mode_include(self):
msa = TabularMSA([DNA('AC'),
DNA('-G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='include')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=5),
1. - scipy.stats.entropy([0.5, 0.5], base=5)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([DNA('AC'),
DNA('.G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='include')
expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=5),
1. - scipy.stats.entropy([0.5, 0.5], base=5)])
npt.assert_array_equal(actual, expected)
def test_gap_mode_include_gaps_treated_as_single_char(self):
msa = TabularMSA([DNA('.'),
DNA('-')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='include')
expected = np.array([1. - scipy.stats.entropy([1.0], base=5)])
npt.assert_array_equal(actual, expected)
def test_gap_mode_ignore(self):
msa = TabularMSA([DNA('AC'),
DNA('-G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='ignore')
expected = np.array([1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
msa = TabularMSA([DNA('AC'),
DNA('.G')])
actual = msa.conservation(metric='inverse_shannon_uncertainty',
gap_mode='ignore')
expected = np.array([1. - scipy.stats.entropy([1.0], base=4),
1. - scipy.stats.entropy([0.5, 0.5], base=4)])
npt.assert_array_equal(actual, expected)
def test_gap_mode_error(self):
msa = TabularMSA([DNA('-AC-'),
DNA('--CA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
gap_mode="error")
msa = TabularMSA([DNA('AACA'),
DNA('A-CA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
gap_mode="error")
msa = TabularMSA([DNA('AACA'),
DNA('A.CA')])
self.assertRaises(ValueError, msa.conservation,
metric='inverse_shannon_uncertainty',
gap_mode="error")
def test_bad_metric(self):
msa = TabularMSA([DNA('AA'),
DNA('A-')])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(metric='xyz')
msa = TabularMSA([])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(metric='xyz')
def test_bad_gap_mode(self):
msa = TabularMSA([DNA('AA'),
DNA('A-')])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(gap_mode='xyz')
msa = TabularMSA([])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(gap_mode='xyz')
def test_bad_degenerate_mode(self):
msa = TabularMSA([DNA('AA'),
DNA('A-')])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(degenerate_mode='xyz')
msa = TabularMSA([])
with self.assertRaisesRegex(ValueError, r'xyz'):
msa.conservation(degenerate_mode='xyz')
class TestGapFrequencies(unittest.TestCase):
def test_default_behavior(self):
msa = TabularMSA([DNA('AA.'),
DNA('-A-')])
freqs = msa.gap_frequencies()
npt.assert_array_equal(np.array([1, 0, 2]), freqs)
def test_invalid_axis_str(self):
with self.assertRaisesRegex(ValueError, r"axis.*'foo'"):
TabularMSA([]).gap_frequencies(axis='foo')
def test_invalid_axis_int(self):
with self.assertRaisesRegex(ValueError, r"axis.*2"):
TabularMSA([]).gap_frequencies(axis=2)
def test_position_axis_str_and_int_equivalent(self):
msa = TabularMSA([DNA('ACGT'),
DNA('A.G-'),
DNA('----')])
str_freqs = msa.gap_frequencies(axis='position')
int_freqs = msa.gap_frequencies(axis=1)
npt.assert_array_equal(str_freqs, int_freqs)
npt.assert_array_equal(np.array([0, 2, 4]), str_freqs)
def test_sequence_axis_str_and_int_equivalent(self):
msa = TabularMSA([DNA('ACGT'),
DNA('A.G-'),
DNA('----')])
str_freqs = msa.gap_frequencies(axis='sequence')
int_freqs = msa.gap_frequencies(axis=0)
npt.assert_array_equal(str_freqs, int_freqs)
npt.assert_array_equal(np.array([1, 2, 1, 2]), str_freqs)
def test_correct_dtype_absolute_empty(self):
msa = TabularMSA([])
freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([]), freqs)
self.assertEqual(int, freqs.dtype)
def test_correct_dtype_relative_empty(self):
msa = TabularMSA([])
freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([]), freqs)
self.assertEqual(float, freqs.dtype)
def test_correct_dtype_absolute_non_empty(self):
msa = TabularMSA([DNA('AC'),
DNA('-.')])
freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([0, 2]), freqs)
self.assertEqual(int, freqs.dtype)
def test_correct_dtype_relative_non_empty(self):
msa = TabularMSA([DNA('AC'),
DNA('-.')])
freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([0.0, 1.0]), freqs)
self.assertEqual(float, freqs.dtype)
def test_no_sequences_absolute(self):
msa = TabularMSA([])
seq_freqs = msa.gap_frequencies(axis='sequence')
pos_freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([]), seq_freqs)
npt.assert_array_equal(np.array([]), pos_freqs)
def test_no_sequences_relative(self):
msa = TabularMSA([])
seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
pos_freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([]), seq_freqs)
npt.assert_array_equal(np.array([]), pos_freqs)
def test_no_positions_absolute(self):
msa = TabularMSA([DNA('')])
seq_freqs = msa.gap_frequencies(axis='sequence')
pos_freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([]), seq_freqs)
npt.assert_array_equal(np.array([0]), pos_freqs)
def test_no_positions_relative(self):
msa = TabularMSA([DNA('')])
seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
pos_freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([]), seq_freqs)
npt.assert_array_equal(np.array([np.nan]), pos_freqs)
def test_single_sequence_absolute(self):
msa = TabularMSA([DNA('.T')])
seq_freqs = msa.gap_frequencies(axis='sequence')
pos_freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([1, 0]), seq_freqs)
npt.assert_array_equal(np.array([1]), pos_freqs)
def test_single_sequence_relative(self):
msa = TabularMSA([DNA('.T')])
seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
pos_freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([1.0, 0.0]), seq_freqs)
npt.assert_array_equal(np.array([0.5]), pos_freqs)
def test_single_position_absolute(self):
msa = TabularMSA([DNA('.'),
DNA('T')])
seq_freqs = msa.gap_frequencies(axis='sequence')
pos_freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([1]), seq_freqs)
npt.assert_array_equal(np.array([1, 0]), pos_freqs)
def test_single_position_relative(self):
msa = TabularMSA([DNA('.'),
DNA('T')])
seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
pos_freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([0.5]), seq_freqs)
npt.assert_array_equal(np.array([1.0, 0.0]), pos_freqs)
def test_position_axis_absolute(self):
msa = TabularMSA([
DNA('ACGT'), # no gaps
DNA('A.G-'), # some gaps (mixed gap chars)
DNA('----'), # all gaps
DNA('....')]) # all gaps
freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([0, 2, 4, 4]), freqs)
def test_position_axis_relative(self):
msa = TabularMSA([DNA('ACGT'),
DNA('A.G-'),
DNA('CCC.'),
DNA('----'),
DNA('....')])
freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([0.0, 0.5, 0.25, 1.0, 1.0]), freqs)
def test_sequence_axis_absolute(self):
msa = TabularMSA([DNA('AC-.'),
DNA('A.-.'),
DNA('G--.')])
freqs = msa.gap_frequencies(axis='sequence')
npt.assert_array_equal(np.array([0, 2, 3, 3]), freqs)
def test_sequence_axis_relative(self):
msa = TabularMSA([DNA('AC--.'),
DNA('A.A-.'),
DNA('G-A-.')])
freqs = msa.gap_frequencies(axis='sequence', relative=True)
npt.assert_array_equal(np.array([0.0, 2/3, 1/3, 1.0, 1.0]), freqs)
def test_relative_frequencies_precise(self):
class CustomSequence(GrammaredSequence):
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('0123456789')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '0'
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set('')
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {}
msa = TabularMSA([CustomSequence('0123456789')])
freqs = msa.gap_frequencies(axis='position', relative=True)
npt.assert_array_equal(np.array([1.0]), freqs)
def test_custom_gap_characters(self):
class CustomSequence(GrammaredSequence):
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('#$*')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '#'
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set('ABC-.')
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {'D': 'ABC-.'}
msa = TabularMSA([CustomSequence('ABCD'),
CustomSequence('-.-.'),
CustomSequence('A#C*'),
CustomSequence('####'),
CustomSequence('$$$$')])
freqs = msa.gap_frequencies(axis='position')
npt.assert_array_equal(np.array([0, 0, 2, 4, 4]), freqs)
class TestGetPosition(unittest.TestCase):
def test_without_positional_metadata(self):
msa = TabularMSA([DNA('ACG'),
DNA('A-G')])
position = msa._get_position_(1)
self.assertEqual(position, Sequence('C-'))
def test_with_positional_metadata(self):
msa = TabularMSA([DNA('ACG'),
DNA('A-G')],
positional_metadata={'foo': [42, 43, 44],
'bar': ['abc', 'def', 'ghi']})
position = msa._get_position_(1)
self.assertEqual(position,
Sequence('C-', metadata={'foo': 43, 'bar': 'def'}))
class TestIsSequenceAxis(unittest.TestCase):
def setUp(self):
self.msa = TabularMSA([])
def test_invalid_str(self):
with self.assertRaisesRegex(ValueError, r"axis.*'foo'"):
self.msa._is_sequence_axis('foo')
def test_invalid_int(self):
with self.assertRaisesRegex(ValueError, r"axis.*2"):
self.msa._is_sequence_axis(2)
def test_positive_str(self):
self.assertTrue(self.msa._is_sequence_axis('sequence'))
def test_positive_int(self):
self.assertTrue(self.msa._is_sequence_axis(0))
def test_negative_str(self):
self.assertFalse(self.msa._is_sequence_axis('position'))
def test_negative_int(self):
self.assertFalse(self.msa._is_sequence_axis(1))
class TestHashable(unittest.TestCase):
def test_unhashable_type(self):
self.assertNotIsInstance(TabularMSA([]), collections.Hashable)
def test_unhashable_object(self):
with self.assertRaisesRegex(TypeError, r'unhashable'):
hash(TabularMSA([]))
class TestRepr(unittest.TestCase):
def test_repr(self):
# basic sanity checks -- more extensive testing of formatting and
# special cases is performed in TabularMSAReprDoctests below. here we
# only test that pieces of the repr are present. these tests also
# exercise coverage in case doctests stop counting towards coverage in
# the future
# str calls repr
self.assertEqual(repr(TabularMSA([])), str(TabularMSA([])))
self.assertEqual(repr(TabularMSA([DNA('')])),
str(TabularMSA([DNA('')])))
self.assertEqual(repr(TabularMSA([DNA('ACGT')])),
str(TabularMSA([DNA('ACGT')])))
self.assertEqual(repr(TabularMSA([DNA('ACGT'*25) for x in range(10)])),
str(TabularMSA([DNA('ACGT'*25) for x in range(10)])))
# empty
obs = repr(TabularMSA([]))
self.assertEqual(obs.count('\n'), 5)
self.assertTrue(obs.startswith('TabularMSA'))
self.assertIn('sequence count: 0', obs)
self.assertIn('position count: 0', obs)
# minimal
obs = repr(TabularMSA([DNA('')]))
self.assertEqual(obs.count('\n'), 5)
self.assertTrue(obs.startswith('TabularMSA'))
self.assertIn('sequence count: 1', obs)
self.assertIn('position count: 0', obs)
self.assertIn('[DNA]', obs)
# no metadata
obs = repr(TabularMSA([DNA('ACGT')]))
self.assertEqual(obs.count('\n'), 6)
self.assertTrue(obs.startswith('TabularMSA'))
self.assertIn('sequence count: 1', obs)
self.assertIn('position count: 4', obs)
self.assertIn('[DNA]', obs)
self.assertTrue(obs.endswith('ACGT'))
# sequence spanning > 5 lines
obs = repr(TabularMSA([DNA('A' * 71) for x in range(6)]))
self.assertEqual(obs.count('\n'), 10)
self.assertTrue(obs.startswith('TabularMSA'))
self.assertIn('sequence count: 6', obs)
self.assertIn('position count: 71', obs)
self.assertIn('\n...\n', obs)
self.assertIn('[DNA]', obs)
self.assertTrue(obs.endswith('AAAA'))
# sequences overflowing
obs = repr(TabularMSA([DNA('A' * 72)]))
self.assertEqual(obs.count('\n'), 6)
self.assertTrue(obs.startswith('TabularMSA'))
self.assertIn('sequence count: 1', obs)
self.assertIn('position count: 72', obs)
self.assertIn('[DNA]', obs)
self.assertTrue(obs.endswith(' ... ' + 'A'*33))
# NOTE: this must be a *separate* class for doctests only (no unit tests). nose
# will not run the unit tests otherwise
#
# these doctests exercise the correct formatting of TabularMSA's repr in a
# variety of situations. they are more extensive than the unit tests above
# (TestRepr.test_repr) but cannot be relied upon for coverage (the unit tests
# take care of this)
class TabularMSAReprDoctests:
r"""
>>> from skbio import DNA, TabularMSA
Empty (minimal) MSA:
>>> TabularMSA([])
TabularMSA
---------------------
Stats:
sequence count: 0
position count: 0
---------------------
MSA with single empty sequence:
>>> TabularMSA([DNA('')])
TabularMSA[DNA]
---------------------
Stats:
sequence count: 1
position count: 0
---------------------
MSA with single sequence with single character:
>>> TabularMSA([DNA('G')])
TabularMSA[DNA]
---------------------
Stats:
sequence count: 1
position count: 1
---------------------
G
MSA with multicharacter sequence:
>>> TabularMSA([DNA('ACGT')])
TabularMSA[DNA]
---------------------
Stats:
sequence count: 1
position count: 4
---------------------
ACGT
Full single line:
>>> TabularMSA([DNA('A' * 71)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 1
position count: 71
-----------------------------------------------------------------------
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Full single line with 1 character overflow:
>>> TabularMSA([DNA('A' * 72)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 1
position count: 72
-----------------------------------------------------------------------
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ... AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Two sequences with full lines:
>>> TabularMSA([DNA('T' * 71), DNA('T' * 71)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 2
position count: 71
-----------------------------------------------------------------------
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
Two sequences with full lines with 1 character overflow:
>>> TabularMSA([DNA('T' * 72), DNA('T' * 72)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 2
position count: 72
-----------------------------------------------------------------------
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT ... TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT ... TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
Five full lines (maximum amount of information):
>>> TabularMSA([DNA('A' * 71) for x in range(5)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 5
position count: 71
-----------------------------------------------------------------------
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Six lines starts "summarized" output:
>>> TabularMSA([DNA('A' * 71) for x in range(6)])
TabularMSA[DNA]
-----------------------------------------------------------------------
Stats:
sequence count: 6
position count: 71
-----------------------------------------------------------------------
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
...
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Supply horrendous metadata and positional metadata to exercise a variety of
metadata formatting cases and rules. Sorting should be by type, then by
value within each type (Python 3 doesn't allow sorting of mixed types):
>>> metadata = {
... # str key, str value
... 'abc': 'some description',
... # int value
... 'foo': 42,
... # unsupported type (dict) value
... 'bar': {},
... # int key, wrapped str (single line)
... 42: 'some words to test text wrapping and such... yada yada yada '
... 'yada yada yada yada yada.',
... # bool key, wrapped str (multi-line)
... True: 'abc ' * 34,
... # float key, truncated str (too long)
... 42.5: 'abc ' * 200,
... # unsupported type (tuple) key, unsupported type (list) value
... ('foo', 'bar'): [1, 2, 3],
... # bytes key, single long word that wraps
... b'long word': 'abc' * 30,
... # truncated key (too long), None value
... 'too long of a key name to display in repr': None,
... # wrapped bytes value (has b'' prefix)
... 'bytes wrapped value': b'abcd' * 25,
... # float value
... 0.1: 99.9999,
... # bool value
... 43: False,
... # None key, complex value
... None: complex(-1.0, 0.0),
... # nested quotes
... 10: '"\''
... }
>>> positional_metadata = pd.DataFrame({
... # str key, int list value
... 'foo': [1, 2, 3, 4],
... # float key, float list value
... 42.5: [2.5, 3.0, 4.2, -0.00001],
... # int key, object list value
... 42: [[], 4, 5, {}],
... # truncated key (too long), bool list value
... 'abc' * 90: [True, False, False, True],
... # None key
... None: range(4)})
>>> positional_metadata = positional_metadata.reindex(
... columns=['foo', 42.5, 42, 'abc' * 90, None])
>>> TabularMSA([DNA('ACGT')], metadata=metadata,
... positional_metadata=positional_metadata)
TabularMSA[DNA]
-----------------------------------------------------------------------
Metadata:
None: (-1+0j)
True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc '
b'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
bcabcabcabcabcabcabcabcabcabcabcabcabc'
0.1: 99.9999
42.5: <class 'str'>
10: '"\''
42: 'some words to test text wrapping and such... yada yada yada
yada yada yada yada yada.'
43: False
'abc': 'some description'
'bar': <class 'dict'>
'bytes wrapped value': b'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab
cdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
abcdabcdabcdabcd'
'foo': 42
<class 'str'>: None
<class 'tuple'>: <class 'list'>
Positional metadata:
'foo': <dtype: int64>
42.5: <dtype: float64>
42: <dtype: object>
<class 'str'>: <dtype: bool>
None: <dtype: int64>
Stats:
sequence count: 1
position count: 4
-----------------------------------------------------------------------
ACGT
"""
pass
if __name__ == "__main__":
unittest.main()
|
gregcaporaso/scikit-bio
|
skbio/alignment/tests/test_tabular_msa.py
|
Python
|
bsd-3-clause
| 148,436
|
[
"scikit-bio"
] |
da7d9936f0f7cb6ee705feac4ecc465a0a28d83a71780da4188fcf6ae05132eb
|
import numpy as np
from scipy import linalg
from scipy import spatial
# Covariance functions
def approx_quantile(coverage_prob, d, n, exp=1):
'''
Compute approximate coverage_prob quantile of maximal distance between n
spherically-distributed points with identity covariance and the origin.
Arguments
---------
coverage_prob : number
Probability that maximum distance is less than returned value.
d : integer
Number of dimensions.
n : integer
Number of distances over which points are distributed.
exp : number
Tail exponent for distance distribution (upper bound)
Returns
-------
q : float
Approximate coverage_prob quantile of maximum distance distribution.
'''
if exp < 1:
raise ValueError('Tail exponent too small.')
elif 1 <= exp and exp < 2:
return np.sqrt(
d * (-np.log(-np.log(coverage_prob)) + np.log(n)))
elif exp >= 2:
return np.sqrt(
np.sqrt(2. * d) * (
-np.log(-np.log(coverage_prob)) / np.sqrt(2 * np.log(n)) +
np.sqrt(2 * np.log(n)) -
0.5 * np.log(np.log(n) * 4 * np.pi) / np.sqrt(2 * np.log(n)) +
d))
def cov_sqexp(r, scale=1.):
'''
Squared exponential (Gaussian) covariance function with given scale.
'''
return np.exp(- (r / scale) ** 2)
def build_grid(d, grid_radius=1., grid_transform=None, grid_min_spacing=0.5,
grid_shape='spherical'):
'''
Build regular cubic or spherical grid in d dimensions.
Arguments
---------
d : integer
Number of dimensions for grid.
grid_radius : number
Minimum radius of grid before transform, inclusive.
grid_transform : np.ndarray or matrix
Optional d x d nd.array or matrix providing transformation from cubic or
spherical grid into space of interest. Should be lower-triangular and
positive-definite.
grid_min_spacing : float
Minimum spacing of grid after transformation.
grid_shape : string
Shape of grid, 'cubic' or 'spherical'. Spherical is truncated cubic grid.
Returns
-------
grid : ndarray
d x n_grid ndarray containing the computed grid. Each column is a single
vector in R^d.
'''
# Find eigenvalues of transformation
if grid_transform is None:
transform_eigenvalues = np.ones(d)
else:
transform_eigenvalues = np.diag(grid_transform)
# Build grid before rotation and scaling, adjusting spacing as needed
grid_radius = float(grid_radius)
h_grid = [grid_radius / np.ceil(grid_radius / grid_min_spacing * v) for v in
transform_eigenvalues]
dim_grid = [int(2 * grid_radius / h + 1) for h in h_grid]
dim_grid_float = np.array(dim_grid, dtype=float)
grid = np.mgrid[tuple(slice(0, l) for l in dim_grid)]
grid = np.array([z.flatten() for z in grid], dtype=float).T
grid /= (dim_grid_float - 1.) / 2.
grid -= 1.
# Truncate to sphere if requested
if grid_shape[:5] == 'spher':
grid = grid[np.sum(grid ** 2, 1) <= 1]
# Rescale for radius
grid *= grid_radius
# Transform and recenter
if grid_transform is not None:
grid = np.dot(grid, grid_transform.T)
return grid
def build_emulator(f, center, slope_mean=None, cov=cov_sqexp, grid_radius=1.,
grid_transform=None, grid_min_spacing=0.5,
grid_shape='spherical', f_args=(), f_kwargs={}, cov_args=(),
cov_kwargs={}, min_cov=1e-9, cov_step=10.,
max_log10_condition=10.):
'''
Build Gaussian processor emulator for given function.
Parameters
----------
f : function
Function to emulate. Must take a d x n matrix as its first argument and
return return a k x n ndarray containing the function value for each
d-dimensional column of the input matrix. Called as
f(x, *f_args, **f_kwargs) in evaluations.
center : d length ndarray
Center of sampling region for emulator.
slope_mean : d x d ndarray
Optional linear approximation for f(x - center). Must be lower-triangular.
cov : function
Covariance function for Gaussian process. Must accept ndarray of distances
as first argument and return an ndarray of the same dimension. Called as
cov(dm, *cov_args, **cov_kwargs).
grid_radius : number
Minimum radius of grid before transform, inclusive.
grid_transform : np.ndarray or matrix
Optional d x d nd.array or matrix providing transformation from cubic or
spherical grid into space of interest. Should be lower-triangular and
positive-definite.
grid_min_spacing : float
Minimum spacing of grid after transformation.
grid_shape : string
Shape of grid, 'cubic' or 'spherical'. Spherical is truncated cubic grid.
f_args : tuple
Tuple of additional positional arguments for f.
f_kwargs : dict
Dictionary of additional kw arguments for f.
cov_args : tuple
Tuple of additional positional arguments for cov.
cov_kwargs : tuple
Dictionary of additional kw arguments for cov.
min_cov : float
Initial minimum covariance; covariance matrix is truncated at this value.
cov_step : float
Multiplicative step for minimum covariance (upward) if covariance matrix
is computationally singular.
max_log10_condition : number
Maximum log10 condition number to accept for covariance matrix.
Truncation continues at min_cov * cov_step**k until this is satisfied.
Returns
-------
A dictionary containing:
- grid : d x n_grid ndarray
The computed grid for approximation.
- v : n_grid x k ndarray
Array for approximation.
- center : d length ndarray
Center of emulation region.
- slope_mean : d x d ndarray
Optional slope of linear mean function. Can be None.
'''
# Get dimensions
d = np.size(center)
# Build grid
grid = build_grid(
d=d, grid_radius=grid_radius, grid_transform=grid_transform,
grid_min_spacing=grid_min_spacing, grid_shape=grid_shape)
grid += center
# Evaluate function over grid
f_values = f(grid.T, *f_args, **f_kwargs)
if slope_mean is not None:
f_values -= np.dot(slope_mean, (grid - center).T)
# Compute covariance matrix for GP
C = spatial.distance_matrix(grid, grid, p=2)
C = cov(C, *cov_args, **cov_kwargs)
# Truncate at minimum covariance
C[C < min_cov] = 0.
# Continue to truncate at higher covariances if needed for numerical
# stability
svals = linalg.svdvals(C)
log10_condition = np.ptp(np.log10(svals))
while log10_condition > max_log10_condition:
min_cov *= cov_step
C[C < min_cov] = 0.
svals = linalg.svdvals(C)
log10_condition = np.ptp(np.log10(svals))
# Compute vector for subsequent approximations
v = linalg.solve(C, f_values.T)
# Build output
emulator = {'grid': grid, 'v': v,
'center': center, 'slope_mean': slope_mean}
return emulator
def evaluate_emulator(x, emulator, cov, cov_args=(), cov_kwargs={}):
'''
Evaluates emulator at given point or sequence of points
Arguments
---------
x : ndarray
Array of length d or of dimension d x m, with each column containing a point
at which to evaluate the emulator.
emulator : dict
Dictionary as output by build_emulator containing grid and v.
cov : function
Covariance function for Gaussian process. Must accept ndarray of distances
as first argument and return an ndarray of the same dimension. Called as
cov(dm, *cov_args, **cov_kwargs).
cov_args : tuple
Tuple of additional positional arguments for cov.
cov_kwargs : tuple
Dictionary of additional kw arguments for cov.
Returns
-------
f_hat : ndarray
Array of size k x m containing estimated values of function.
'''
# Convert x to matrix if needed
if not type(x) is np.ndarray:
x = np.array(x)
if len(x.shape) < 2:
x = x[:, np.newaxis]
# Evaluate distances between x and grid
C = spatial.distance_matrix(x.T, emulator['grid'])
C = cov(C, *cov_args, **cov_kwargs)
# Estimate function values at x
f_hat = np.dot(emulator['v'].T, C.T)
# Add linear term if needed
if emulator['slope_mean'] is not None:
f_hat += np.dot(emulator['slope_mean'], (x.T - emulator['center']).T)
if x.shape[1] < 2:
f_hat = f_hat[:, 0]
return f_hat
def evaluate_emulator_nogrid(
x, v, center, cov, slope_mean=None, grid_radius=1.,
grid_transform=None, grid_min_spacing=0.5,
grid_shape='spherical', cov_args=(),
cov_kwargs={}):
'''
Evaluates emulator at given point or sequence of points, reconstructing the
grid from other arguments. This is useful in communication-limited settings
where the grid parameters are common knowledge.
Arguments
---------
x : ndarray
Array of length d or of dimension d x m, with each column containing a point
at which to evaluate the emulator.
- v : n_grid x k ndarray
Matrix for approximation.
center : d length ndarray
Center of emulation region.
cov : function
Covariance function for Gaussian process. Must accept ndarray of distances
as first argument and return an ndarray of the same dimension. Called as
cov(dm, *cov_args, **cov_kwargs).
slope_mean : d x d ndarray
Optional slope of linear mean function. Can be None.
grid_radius : number
Minimum radius of grid before transform, inclusive.
grid_transform : np.ndarray or matrix
Optional d x d nd.array or matrix providing transformation from cubic or
spherical grid into space of interest. Should be lower-triangular and
positive-definite.
grid_min_spacing : float
Minimum spacing of grid after transformation.
grid_shape : string
Shape of grid, 'cubic' or 'spherical'. Spherical is truncated cubic
grid.
cov_args : tuple
Tuple of additional positional arguments for cov.
cov_kwargs : tuple
Dictionary of additional kw arguments for cov.
Returns
-------
f_hat : ndarray
Array of length m containing estimated values of function.
'''
# Convert x to matrix if needed
if not type(x) is np.ndarray:
x = np.array(x)
if len(x.shape) < 2:
x = x[:, np.newaxis]
# Build grid
grid = build_grid(
d=d, grid_radius=grid_radius, grid_transform=grid_transform,
grid_min_spacing=grid_min_spacing, grid_shape=grid_shape)
grid += center
# Evaluate distances between x and grid
C = spatial.distance_matrix(x.T, grid)
C = cov(C, *cov_args, **cov_kwargs)
# Estimate function values at x
f_hat = np.dot(C, v).T
# Add linear term if needed
if slope_mean is not None:
f_hat += np.dot(slope_mean, (x.T - slope_mean).T)
if x.shape[1] < 2:
f_hat = f_hat[:, 0]
return f_hat
def aggregate_emulators(emulators, **kwargs):
'''
Aggregate list or tuple of emulators into a single emulator for their sum.
Arguments
---------
emulators : list-like
List-like collection of emulators
**kwargs
Additional aggregations to compute. These should be functions taking a
single emulator as an argument. Each is applied to each entry of the
emulator and summed.
Returns
-------
emulator : dict
A dictionary for the combined emulator containing
- grid : d x n_grid ndarray
The computed grid for approximation.
- v : n_grid x k ndarray
Array for approximation.
- center : d length ndarray
Center of emulation region.
- slope_mean : d x d ndarray
Optional slope of linear mean function. Can be None.
'''
# Get dimensions
d = np.size(emulators[0]['center'])
k = np.shape(emulators[0]['v'])[1]
n_grids = np.array([emulator['grid'].shape[0] for emulator in emulators],
dtype=int)
n_grid_agg = np.sum(n_grids)
# Allocate arrays for combined emulator
v_agg = np.empty((n_grid_agg, k))
grid_agg = np.empty((n_grid_agg, d))
center_agg = np.zeros(d)
slope_mean_agg = np.zeros((d, d))
# Allocate arrays for additional aggregations
if len(kwargs) > 0:
aggs = dict(zip(kwargs.keys(), np.zeros(len(kwargs))))
# Iterate over emulators
start = 0
for i, emulator in enumerate(emulators):
v_agg[start:start + n_grids[i], :] = emulator['v']
grid_agg[start:start + n_grids[i], :] = emulator['grid']
start += n_grids[i]
if emulator['slope_mean'] is not None:
slope_mean_agg += emulator['slope_mean']
center_agg += np.dot(emulator['slope_mean'], emulator['center'])
if len(kwargs) > 0:
for k in aggs:
aggs[k] += kwargs[k](emulator)
if np.max(np.abs(slope_mean_agg)) > 0:
center_agg = linalg.solve_triangular(
slope_mean_agg, center_agg, lower=True)
emulator = {'grid': grid_agg, 'v': v_agg,
'center': center_agg, 'slope_mean': slope_mean_agg}
if len(kwargs) > 0:
emulator.update(aggs)
return emulator
def aggregate_emulators_mpi(comm, emulator=None, MPIROOT=0, **kwargs):
'''
Aggregate emulators from workers into a single emulator for their sum via
MPI gather operation (serialized).
Arguments
---------
comm : mpi4py communicator
MPI communicator.
emulator : dict
Dictionary as output by build_emulator containing grid and v.
MPIROOT : int
Rank of root MPI process.
**kwargs
Additional aggregations to compute. These should be functions taking a
single emulator as an argument. Each is applied to each entry of the
emulator and summed.
Returns
-------
On the workers, None.
On the master,
emulator : dict
A dictionary for the combined emulator containing
- grid : d x n_grid ndarray
The computed grid for approximation.
- v : n_grid x k ndarray
Array for approximation.
- center : d length ndarray
Center of emulation region.
- slope_mean : d x d ndarray
Optional slope of linear mean function. Can be None.
'''
# Get number of workers and MPI rank
mpi_rank = comm.Get_rank()
if mpi_rank == MPIROOT:
# Master node process
# Gather entire emulators from individual nodes
emulator_list = comm.gather(None, root=MPIROOT)[1:]
return aggregate_emulators(emulator_list, **kwargs)
else:
# Worker node process
# Send emulator to aggregator
comm.gather(emulator, root=MPIROOT)
|
awblocker/quantitation
|
lib/quantitation/emulate.py
|
Python
|
bsd-3-clause
| 14,952
|
[
"Gaussian"
] |
9831a236225e4d900a09c7f1e0f4e649848fecf2b50b0b165c91f9a44b0043a6
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from functools import partial
from PyQt5.Qt import (
QMainWindow, Qt, QApplication, pyqtSignal, QLabel, QIcon, QFormLayout,
QDialog, QSpinBox, QCheckBox, QDialogButtonBox, QToolButton, QMenu, QInputDialog)
from calibre.gui2 import error_dialog
from calibre.gui2.tweak_book import actions, tprefs, editors
from calibre.gui2.tweak_book.editor.canvas import Canvas
class ResizeDialog(QDialog): # {{{
def __init__(self, width, height, parent=None):
QDialog.__init__(self, parent)
self.l = l = QFormLayout(self)
self.setLayout(l)
self.aspect_ratio = width / float(height)
l.addRow(QLabel(_('Choose the new width and height')))
self._width = w = QSpinBox(self)
w.setMinimum(1)
w.setMaximum(10 * width)
w.setValue(width)
w.setSuffix(' px')
l.addRow(_('&Width:'), w)
self._height = h = QSpinBox(self)
h.setMinimum(1)
h.setMaximum(10 * height)
h.setValue(height)
h.setSuffix(' px')
l.addRow(_('&Height:'), h)
w.valueChanged.connect(partial(self.keep_ar, 'width'))
h.valueChanged.connect(partial(self.keep_ar, 'height'))
self.ar = ar = QCheckBox(_('Keep &aspect ratio'))
ar.setChecked(True)
l.addRow(ar)
self.resize(self.sizeHint())
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addRow(bb)
def keep_ar(self, which):
if self.ar.isChecked():
val = getattr(self, which)
oval = val / self.aspect_ratio if which == 'width' else val * self.aspect_ratio
other = getattr(self, '_height' if which == 'width' else '_width')
other.blockSignals(True)
other.setValue(oval)
other.blockSignals(False)
@dynamic_property
def width(self):
def fget(self):
return self._width.value()
def fset(self, val):
self._width.setValue(val)
return property(fget=fget, fset=fset)
@dynamic_property
def height(self):
def fget(self):
return self._height.value()
def fset(self, val):
self._height.setValue(val)
return property(fget=fget, fset=fset)
# }}}
class Editor(QMainWindow):
has_line_numbers = False
modification_state_changed = pyqtSignal(object)
undo_redo_state_changed = pyqtSignal(object, object)
data_changed = pyqtSignal(object)
cursor_position_changed = pyqtSignal() # dummy
copy_available_state_changed = pyqtSignal(object)
def __init__(self, syntax, parent=None):
QMainWindow.__init__(self, parent)
if parent is None:
self.setWindowFlags(Qt.Widget)
self.is_synced_to_container = False
self.syntax = syntax
self._is_modified = False
self.copy_available = self.cut_available = False
self.quality = 90
self.canvas = Canvas(self)
self.setCentralWidget(self.canvas)
self.create_toolbars()
self.canvas.image_changed.connect(self.image_changed)
self.canvas.undo_redo_state_changed.connect(self.undo_redo_state_changed)
self.canvas.selection_state_changed.connect(self.update_clipboard_actions)
@dynamic_property
def is_modified(self):
def fget(self):
return self._is_modified
def fset(self, val):
self._is_modified = val
self.modification_state_changed.emit(val)
return property(fget=fget, fset=fset)
@property
def undo_available(self):
return self.canvas.undo_action.isEnabled()
@property
def redo_available(self):
return self.canvas.redo_action.isEnabled()
@dynamic_property
def current_line(self):
def fget(self):
return 0
def fset(self, val):
pass
return property(fget=fget, fset=fset)
@property
def number_of_lines(self):
return 0
def pretty_print(self, name):
return False
def change_document_name(self, newname):
pass
def get_raw_data(self):
return self.canvas.get_image_data(quality=self.quality)
@dynamic_property
def data(self):
def fget(self):
return self.get_raw_data()
def fset(self, val):
self.canvas.load_image(val)
return property(fget=fget, fset=fset)
def replace_data(self, raw, only_if_different=True):
# We ignore only_if_different as it is useless in our case, and
# there is no easy way to check two images for equality
self.data = raw
def apply_settings(self, prefs=None, dictionaries_changed=False):
pass
def go_to_line(self, *args, **kwargs):
pass
def save_state(self):
for bar in self.bars:
if bar.isFloating():
return
tprefs['image-editor-state'] = bytearray(self.saveState())
def restore_state(self):
state = tprefs.get('image-editor-state', None)
if state is not None:
self.restoreState(state)
def set_focus(self):
self.canvas.setFocus(Qt.OtherFocusReason)
def undo(self):
self.canvas.undo_action.trigger()
def redo(self):
self.canvas.redo_action.trigger()
def copy(self):
self.canvas.copy()
def cut(self):
return error_dialog(self, _('Not allowed'), _(
'Cutting of images is not allowed. If you want to delete the image, use'
' the files browser to do it.'), show=True)
def paste(self):
self.canvas.paste()
# Search and replace {{{
def mark_selected_text(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
return False
def replace(self, *args, **kwargs):
return False
def all_in_marked(self, *args, **kwargs):
return 0
@property
def selected_text(self):
return ''
# }}}
def image_changed(self, new_image):
self.is_synced_to_container = False
self._is_modified = True
self.copy_available = self.canvas.is_valid
self.copy_available_state_changed.emit(self.copy_available)
self.data_changed.emit(self)
self.modification_state_changed.emit(True)
self.fmt_label.setText(' ' + (self.canvas.original_image_format or '').upper())
im = self.canvas.current_image
self.size_label.setText('{0} x {1}{2}'.format(im.width(), im.height(), ' px'))
def break_cycles(self):
self.canvas.break_cycles()
self.canvas.image_changed.disconnect()
self.canvas.undo_redo_state_changed.disconnect()
self.canvas.selection_state_changed.disconnect()
self.modification_state_changed.disconnect()
self.undo_redo_state_changed.disconnect()
self.data_changed.disconnect()
self.cursor_position_changed.disconnect()
self.copy_available_state_changed.disconnect()
def contextMenuEvent(self, ev):
ev.ignore()
def create_toolbars(self):
self.action_bar = b = self.addToolBar(_('File actions tool bar'))
b.setObjectName('action_bar') # Needed for saveState
for x in ('undo', 'redo'):
b.addAction(getattr(self.canvas, '%s_action' % x))
self.edit_bar = b = self.addToolBar(_('Edit actions tool bar'))
b.setObjectName('edit-actions-bar')
for x in ('copy', 'paste'):
ac = actions['editor-%s' % x]
setattr(self, 'action_' + x, b.addAction(ac.icon(), x, getattr(self, x)))
self.update_clipboard_actions()
b.addSeparator()
self.action_trim = ac = b.addAction(QIcon(I('trim.png')), _('Trim image'), self.canvas.trim_image)
self.action_rotate = ac = b.addAction(QIcon(I('rotate-right.png')), _('Rotate image'), self.canvas.rotate_image)
self.action_resize = ac = b.addAction(QIcon(I('resize.png')), _('Resize image'), self.resize_image)
b.addSeparator()
self.action_filters = ac = b.addAction(QIcon(I('filter.png')), _('Image filters'))
b.widgetForAction(ac).setPopupMode(QToolButton.InstantPopup)
self.filters_menu = m = QMenu()
ac.setMenu(m)
m.addAction(_('Auto-trim image'), self.canvas.autotrim_image)
m.addAction(_('Sharpen image'), self.sharpen_image)
m.addAction(_('Blur image'), self.blur_image)
m.addAction(_('De-speckle image'), self.canvas.despeckle_image)
self.info_bar = b = self.addToolBar(_('Image information bar'))
b.setObjectName('image_info_bar')
self.fmt_label = QLabel('')
b.addWidget(self.fmt_label)
b.addSeparator()
self.size_label = QLabel('')
b.addWidget(self.size_label)
self.bars = [self.action_bar, self.edit_bar, self.info_bar]
for x in self.bars:
x.setFloatable(False)
x.topLevelChanged.connect(self.toolbar_floated)
self.restore_state()
def toolbar_floated(self, floating):
if not floating:
self.save_state()
for ed in editors.itervalues():
if ed is not self:
ed.restore_state()
def update_clipboard_actions(self, *args):
if self.canvas.has_selection:
self.action_copy.setText(_('Copy selected region'))
self.action_paste.setText(_('Paste into selected region'))
else:
self.action_copy.setText(_('Copy image'))
self.action_paste.setText(_('Paste image'))
def resize_image(self):
im = self.canvas.current_image
d = ResizeDialog(im.width(), im.height(), self)
if d.exec_() == d.Accepted:
self.canvas.resize_image(d.width, d.height)
def sharpen_image(self):
val, ok = QInputDialog.getInt(self, _('Sharpen image'), _(
'The standard deviation for the Gaussian sharpen operation (higher means more sharpening)'), value=3, min=1, max=20)
if ok:
self.canvas.sharpen_image(sigma=val)
def blur_image(self):
val, ok = QInputDialog.getInt(self, _('Blur image'), _(
'The standard deviation for the Gaussian blur operation (higher means more blurring)'), value=3, min=1, max=20)
if ok:
self.canvas.blur_image(sigma=val)
def launch_editor(path_to_edit, path_is_raw=False):
app = QApplication([])
if path_is_raw:
raw = path_to_edit
else:
with open(path_to_edit, 'rb') as f:
raw = f.read()
t = Editor('raster_image')
t.data = raw
t.show()
app.exec_()
if __name__ == '__main__':
launch_editor(sys.argv[-1])
|
sharad/calibre
|
src/calibre/gui2/tweak_book/editor/image.py
|
Python
|
gpl-3.0
| 11,020
|
[
"Gaussian"
] |
586fe7d38b29668f7ab0d163c0c3805f31a5645e50c8be0d11ae51c55afd5249
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.orchestration.airflow.service_v1beta1.services.image_versions import (
ImageVersionsAsyncClient,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.image_versions import (
ImageVersionsClient,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.image_versions import (
pagers,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.image_versions import (
transports,
)
from google.cloud.orchestration.airflow.service_v1beta1.types import image_versions
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ImageVersionsClient._get_default_mtls_endpoint(None) is None
assert (
ImageVersionsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ImageVersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ImageVersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageVersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageVersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [ImageVersionsClient, ImageVersionsAsyncClient,]
)
def test_image_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.ImageVersionsGrpcTransport, "grpc"),
(transports.ImageVersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_image_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [ImageVersionsClient, ImageVersionsAsyncClient,]
)
def test_image_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
def test_image_versions_client_get_transport_class():
transport = ImageVersionsClient.get_transport_class()
available_transports = [
transports.ImageVersionsGrpcTransport,
]
assert transport in available_transports
transport = ImageVersionsClient.get_transport_class("grpc")
assert transport == transports.ImageVersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageVersionsClient, transports.ImageVersionsGrpcTransport, "grpc"),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ImageVersionsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsClient),
)
@mock.patch.object(
ImageVersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsAsyncClient),
)
def test_image_versions_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ImageVersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ImageVersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ImageVersionsClient, transports.ImageVersionsGrpcTransport, "grpc", "true"),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ImageVersionsClient, transports.ImageVersionsGrpcTransport, "grpc", "false"),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ImageVersionsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsClient),
)
@mock.patch.object(
ImageVersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_image_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [ImageVersionsClient, ImageVersionsAsyncClient]
)
@mock.patch.object(
ImageVersionsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsClient),
)
@mock.patch.object(
ImageVersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageVersionsAsyncClient),
)
def test_image_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageVersionsClient, transports.ImageVersionsGrpcTransport, "grpc"),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ImageVersionsClient,
transports.ImageVersionsGrpcTransport,
"grpc",
grpc_helpers,
),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_image_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_image_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.image_versions.transports.ImageVersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ImageVersionsClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ImageVersionsClient,
transports.ImageVersionsGrpcTransport,
"grpc",
grpc_helpers,
),
(
ImageVersionsAsyncClient,
transports.ImageVersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_image_versions_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"composer.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="composer.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [image_versions.ListImageVersionsRequest, dict,]
)
def test_list_image_versions(request_type, transport: str = "grpc"):
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_versions.ListImageVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_image_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_versions.ListImageVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListImageVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_image_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
client.list_image_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_versions.ListImageVersionsRequest()
@pytest.mark.asyncio
async def test_list_image_versions_async(
transport: str = "grpc_asyncio",
request_type=image_versions.ListImageVersionsRequest,
):
client = ImageVersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_versions.ListImageVersionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_image_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_versions.ListImageVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListImageVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_image_versions_async_from_dict():
await test_list_image_versions_async(request_type=dict)
def test_list_image_versions_field_headers():
client = ImageVersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = image_versions.ListImageVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
call.return_value = image_versions.ListImageVersionsResponse()
client.list_image_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_image_versions_field_headers_async():
client = ImageVersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = image_versions.ListImageVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_versions.ListImageVersionsResponse()
)
await client.list_image_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_image_versions_flattened():
client = ImageVersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_versions.ListImageVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_image_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_image_versions_flattened_error():
client = ImageVersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_image_versions(
image_versions.ListImageVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_image_versions_flattened_async():
client = ImageVersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_versions.ListImageVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_versions.ListImageVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_image_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_image_versions_flattened_error_async():
client = ImageVersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_image_versions(
image_versions.ListImageVersionsRequest(), parent="parent_value",
)
def test_list_image_versions_pager(transport_name: str = "grpc"):
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
next_page_token="abc",
),
image_versions.ListImageVersionsResponse(
image_versions=[], next_page_token="def",
),
image_versions.ListImageVersionsResponse(
image_versions=[image_versions.ImageVersion(),], next_page_token="ghi",
),
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_image_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, image_versions.ImageVersion) for i in results)
def test_list_image_versions_pages(transport_name: str = "grpc"):
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
next_page_token="abc",
),
image_versions.ListImageVersionsResponse(
image_versions=[], next_page_token="def",
),
image_versions.ListImageVersionsResponse(
image_versions=[image_versions.ImageVersion(),], next_page_token="ghi",
),
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
),
RuntimeError,
)
pages = list(client.list_image_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_image_versions_async_pager():
client = ImageVersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
next_page_token="abc",
),
image_versions.ListImageVersionsResponse(
image_versions=[], next_page_token="def",
),
image_versions.ListImageVersionsResponse(
image_versions=[image_versions.ImageVersion(),], next_page_token="ghi",
),
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
),
RuntimeError,
)
async_pager = await client.list_image_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, image_versions.ImageVersion) for i in responses)
@pytest.mark.asyncio
async def test_list_image_versions_async_pages():
client = ImageVersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_image_versions),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
next_page_token="abc",
),
image_versions.ListImageVersionsResponse(
image_versions=[], next_page_token="def",
),
image_versions.ListImageVersionsResponse(
image_versions=[image_versions.ImageVersion(),], next_page_token="ghi",
),
image_versions.ListImageVersionsResponse(
image_versions=[
image_versions.ImageVersion(),
image_versions.ImageVersion(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_image_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageVersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageVersionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageVersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageVersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ImageVersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageVersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ImageVersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageVersionsGrpcTransport,
transports.ImageVersionsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ImageVersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ImageVersionsGrpcTransport,)
def test_image_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ImageVersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_image_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.image_versions.transports.ImageVersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ImageVersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("list_image_versions",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_image_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.image_versions.transports.ImageVersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageVersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_image_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.image_versions.transports.ImageVersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageVersionsTransport()
adc.assert_called_once()
def test_image_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ImageVersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageVersionsGrpcTransport,
transports.ImageVersionsGrpcAsyncIOTransport,
],
)
def test_image_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ImageVersionsGrpcTransport, grpc_helpers),
(transports.ImageVersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_image_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"composer.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="composer.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageVersionsGrpcTransport,
transports.ImageVersionsGrpcAsyncIOTransport,
],
)
def test_image_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_image_versions_host_no_port():
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com"
),
)
assert client.transport._host == "composer.googleapis.com:443"
def test_image_versions_host_with_port():
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com:8000"
),
)
assert client.transport._host == "composer.googleapis.com:8000"
def test_image_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageVersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_image_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageVersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageVersionsGrpcTransport,
transports.ImageVersionsGrpcAsyncIOTransport,
],
)
def test_image_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageVersionsGrpcTransport,
transports.ImageVersionsGrpcAsyncIOTransport,
],
)
def test_image_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ImageVersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ImageVersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ImageVersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ImageVersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ImageVersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ImageVersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ImageVersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ImageVersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ImageVersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ImageVersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ImageVersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ImageVersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ImageVersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ImageVersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ImageVersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ImageVersionsTransport, "_prep_wrapped_messages"
) as prep:
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ImageVersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ImageVersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = ImageVersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = ImageVersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(ImageVersionsClient, transports.ImageVersionsGrpcTransport),
(ImageVersionsAsyncClient, transports.ImageVersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-orchestration-airflow
|
tests/unit/gapic/service_v1beta1/test_image_versions.py
|
Python
|
apache-2.0
| 61,434
|
[
"Octopus"
] |
13f1efbb451de4b28d1d99272a032e44cd4cdbe8586923aa24517c1332de4d5f
|
"""
Visitor to generate a Graphviz .dot file with an AST representation.
"""
from pydot import pydot
import minivisitor
class GraphvizGenerator(minivisitor.PrintTree):
"""
Render a minivect AST as a graphviz tree.
"""
def __init__(self, context, name, node_color=None, edge_color=None,
node_fontcolor=None, edge_fontcolor=None):
super(GraphvizGenerator, self).__init__(context)
self.name = name
self.counter = 0
self.node_color = node_color
self.edge_color = edge_color
self.node_fontcolor = node_fontcolor
self.edge_fontcolor = edge_fontcolor
def create_node(self, node):
"Create a graphviz node from the miniast node"
label = '"%s"' % self.format_node(node, want_type_info=False)
self.counter += 1
pydot_node = pydot.Node(str(self.counter), label=label)
self.add_node(pydot_node)
return pydot_node
def add_node(self, pydot_node):
"Add a pydot node to the graph and set its colors"
if self.node_color is not None:
pydot_node.set_color(self.node_color)
if self.node_fontcolor is not None:
pydot_node.set_fontcolor(self.node_fontcolor)
self.graph.add_node(pydot_node)
def add_edge(self, source, dest, edge_label=None):
"Add an edge between two pydot nodes and set the colors"
edge = pydot.Edge(source, dest)
if edge_label is not None:
edge.set_label(edge_label)
if self.edge_color is not None:
edge.set_color(self.edge_color)
if self.edge_fontcolor is not None:
edge.set_fontcolor(self.edge_fontcolor)
self.graph.add_edge(edge)
def visit_Node(self, node, pydot_node=None):
"Visit children and add edges to their Graphviz nodes."
if pydot_node is None:
pydot_node = self.create_node(node)
nodes_dict = self.visitchildren(node)
attrs = self.context.getchildren(node)
for attr in attrs:
values = nodes_dict.get(attr, None)
if values is not None:
if isinstance(values, list):
for value in values:
self.add_edge(pydot_node, value)
else:
self.add_edge(pydot_node, values, attr)
return pydot_node
def visit_FunctionNode(self, node):
"Create a graphviz graph"
self.graph = pydot.Dot(self.name, graph_type='digraph')
pydot_function = self.create_node(node)
pydot_body = self.visit(node.body)
# Create artificial arguments for brevity
pydot_args = pydot.Node("Arguments (omitted)")
self.add_node(pydot_args)
self.add_edge(pydot_function, pydot_body)
self.add_edge(pydot_function, pydot_args)
return self.graph
|
markflorisson/minivect
|
minivect/graphviz.py
|
Python
|
bsd-2-clause
| 2,875
|
[
"VisIt"
] |
12d5cd36d99c6c7da61c5c2b8273d07c930f948362c7f90caf2301f8b3d85dd8
|
""" StorageManagementDB is a front end to the Stager Database.
There are five tables in the StorageManagementDB: Tasks, CacheReplicas, TaskReplicas, StageRequests.
The Tasks table is the place holder for the tasks that have requested files to be staged.
These can be from different systems and have different associated call back methods.
The CacheReplicas table keeps the information on all the CacheReplicas in the system.
It maps all the file information LFN, PFN, SE to an assigned ReplicaID.
The TaskReplicas table maps the TaskIDs from the Tasks table to the ReplicaID from the CacheReplicas table.
The StageRequests table contains each of the prestage request IDs for each of the replicas.
"""
__RCSID__ = "$Id$"
import inspect
import threading
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import intListToString, stringListToString
# Stage Request are issue with a length of "PinLength"
# However, once Staged, the entry in the StageRequest will set a PinExpiryTime only for "PinLength" / THROTTLING_STEPS
# As PinExpiryTime arrives, StageRequest and their corresponding CacheReplicas entries are cleaned
# This allows to throttle the submission of Stage Requests up to a maximum of "DiskCacheTB" per "PinLength"
# After "PinLength" / THROTTLING_STEPS seconds, entries are removed, so new requests for the same replica will trigger
# a new Stage Request to the SE, and thus an update of the Pinning on the SE.
#
# - "PinLength" is an Option of the StageRequest Agent that defaults to THROTTLING_TIME
# - "DiskCacheTB" is an Option of the StorageElement that defaults to 1 (TB)
#
THROTTLING_TIME = 86400
THROTTLING_STEPS = 12
class StorageManagementDB( DB ):
def __init__( self, systemInstance = 'Default' ):
DB.__init__( self, 'StorageManagementDB', 'StorageManagement/StorageManagementDB' )
self.lock = threading.Lock()
self.TASKPARAMS = ['TaskID', 'Status', 'Source', 'SubmitTime', 'LastUpdate', 'CompleteTime', 'CallBackMethod', 'SourceTaskID']
self.REPLICAPARAMS = ['ReplicaID', 'Type', 'Status', 'SE', 'LFN', 'PFN', 'Size', 'FileChecksum', 'GUID', 'SubmitTime', 'LastUpdate', 'Reason', 'Links']
self.STAGEPARAMS = ['ReplicaID', 'StageStatus', 'RequestID', 'StageRequestSubmitTime', 'StageRequestCompletedTime', 'PinLength', 'PinExpiryTime']
self.STATES = ['Failed', 'New', 'Waiting', 'Offline', 'StageSubmitted', 'Staged']
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _caller( self ):
return inspect.stack()[2][3]
################################################################
#
# State machine management
#
def updateTaskStatus( self, taskIDs, newTaskStatus, connection = False ):
return self.__updateTaskStatus( taskIDs, newTaskStatus, connection = connection )
def __updateTaskStatus( self, taskIDs, newTaskStatus, force = False, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
if force:
toUpdate = taskIDs
else:
res = self._checkTaskUpdate( taskIDs, newTaskStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
reqSelect = "SELECT TaskID FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), '__updateTaskStatus' ),
"%s. %s" % ( reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE TaskID IN (%s) AND Status != '%s';" % ( newTaskStatus, intListToString( toUpdate ), newTaskStatus )
res = self._update( req, connection )
if not res['OK']:
return res
taskIDs = []
for record in resSelect['Value']:
taskIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
if len( taskIDs ) > 0:
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1["OK"]:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), '__updateTaskStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
return S_OK( toUpdate )
def _checkTaskUpdate( self, taskIDs, newTaskState, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
# * -> Failed
if newTaskState == 'Failed':
oldTaskState = []
# StageCompleting -> Done
elif newTaskState == 'Done':
oldTaskState = ['StageCompleting']
# StageSubmitted -> StageCompleting
elif newTaskState == 'StageCompleting':
oldTaskState = ['StageSubmitted']
# Waiting -> StageSubmitted
elif newTaskState == 'StageSubmitted':
oldTaskState = ['Waiting', 'Offline']
# New -> Waiting
elif newTaskState == 'Waiting':
oldTaskState = ['New']
elif newTaskState == 'Offline':
oldTaskState = ['Waiting']
else:
return S_ERROR( "Task status not recognized" )
if not oldTaskState:
toUpdate = taskIDs
else:
req = "SELECT TaskID FROM Tasks WHERE Status in (%s) AND TaskID IN (%s)" % ( stringListToString( oldTaskState ), intListToString( taskIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def updateReplicaStatus( self, replicaIDs, newReplicaStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkReplicaUpdate( replicaIDs, newReplicaStatus )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), 'updateReplicaStatus' ),
"%s. %s" % ( reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newReplicaStatus, intListToString( toUpdate ), newReplicaStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
if len( replicaIDs ) > 0:
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
res = self._updateTasksForReplica( replicaIDs, connection = connection )
if not res['OK']:
return res
return S_OK( toUpdate )
def _updateTasksForReplica( self, replicaIDs, connection = False ):
tasksInStatus = {}
for state in self.STATES:
tasksInStatus[state] = []
req = "SELECT T.TaskID,T.Status FROM Tasks AS T, TaskReplicas AS R WHERE R.ReplicaID IN ( %s ) AND R.TaskID = T.TaskID GROUP BY T.TaskID;" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
for taskId, status in res['Value']:
subreq = "SELECT DISTINCT(C.Status) FROM TaskReplicas AS R, CacheReplicas AS C WHERE R.TaskID=%s AND R.ReplicaID = C.ReplicaID;" % taskId
subres = self._query( subreq, connection )
if not subres['OK']:
return subres
cacheStatesForTask = [row[0] for row in subres['Value']]
if not cacheStatesForTask:
tasksInStatus['Failed'].append( taskId )
continue
wrongState = False
for state in cacheStatesForTask:
if state not in self.STATES:
wrongState = True
break
if wrongState:
tasksInStatus['Failed'].append( taskId )
continue
for state in self.STATES:
if state in cacheStatesForTask:
if status != state:
tasksInStatus[state].append( taskId )
break
for newStatus in tasksInStatus:
if tasksInStatus[newStatus]:
res = self.__updateTaskStatus( tasksInStatus[newStatus], newStatus, True, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update task associated to replicas", res['Message'] )
# return res
return S_OK( tasksInStatus )
def getAssociatedReplicas( self, replicaIDs ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self._getReplicaIDTasks( replicaIDs )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getAssociatedReplicas: Failed to get Tasks.', res['Message'] )
return res
taskIDs = res['Value']
return self.getCacheReplicas( {'TaskID':taskIDs} )
def _checkReplicaUpdate( self, replicaIDs, newReplicaState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newReplicaState == 'Failed':
oldReplicaState = []
# New -> Waiting
elif newReplicaState == 'Waiting':
oldReplicaState = ['New']
# Waiting -> StageSubmitted
elif newReplicaState == 'StageSubmitted':
oldReplicaState = ['Waiting', 'Offline']
# StageSubmitted -> Staged
elif newReplicaState == 'Staged':
oldReplicaState = ['StageSubmitted']
elif newReplicaState == 'Offline':
oldReplicaState = ['Waiting']
else:
return S_ERROR( "Replica status not recognized" )
if not oldReplicaState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM CacheReplicas WHERE Status IN (%s) AND ReplicaID IN (%s)" % ( stringListToString( oldReplicaState ), intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getTaskStateFromReplicaState( self, replicaState ):
# For the moment the task state just references to the replicaState
return replicaState
def updateStageRequestStatus( self, replicaIDs, newStageStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkStageUpdate( replicaIDs, newStageStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
reqSelect = "Select ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newStageStatus, intListToString( toUpdate ), newStageStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
# Now update the replicas associated to the replicaIDs
newReplicaStatus = self.__getReplicaStateFromStageState( newStageStatus )
res = self.updateReplicaStatus( toUpdate, newReplicaStatus, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update cache replicas associated to stage requests", res['Message'] )
return S_OK( toUpdate )
def _checkStageUpdate( self, replicaIDs, newStageState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newStageState == 'Failed':
oldStageState = []
elif newStageState == 'Staged':
oldStageState = ['StageSubmitted']
else:
return S_ERROR( "StageRequest status not recognized" )
if not oldStageState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM StageRequests WHERE StageStatus = '%s' AND ReplicaID IN (%s)" % ( oldStageState, intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getReplicaStateFromStageState( self, stageState ):
# For the moment the replica state just references to the stage state
return stageState
#
# End of state machine management
#
################################################################
################################################################
#
# Monitoring of stage tasks
#
def getTaskStatus( self, taskID, connection = False ):
""" Obtain the task status from the Tasks table. """
connection = self.__getConnection( connection )
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value'][taskID]
return S_OK( taskInfo['Status'] )
def getTaskInfo( self, taskID, connection = False ):
""" Obtain all the information from the Tasks table for a supplied task. """
connection = self.__getConnection( connection )
req = "SELECT TaskID,Status,Source,SubmitTime,CompleteTime,CallBackMethod,SourceTaskID from Tasks WHERE TaskID IN (%s);" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskInfo: Failed to get task information.', res['Message'] )
return res
resDict = {}
for taskID, status, source, submitTime, completeTime, callBackMethod, sourceTaskID in res['Value']:
resDict[sourceTaskID] = {'Status':status, 'Source':source, 'SubmitTime':submitTime, 'CompleteTime':completeTime, 'CallBackMethod':callBackMethod, 'SourceTaskID':sourceTaskID}
if not resDict:
gLogger.error( 'StorageManagementDB.getTaskInfo: The supplied task did not exist', taskID )
return S_ERROR( 'The supplied task %s did not exist' % taskID )
return S_OK( resDict )
def _getTaskIDForJob ( self, jobID, connection = False ):
# Stager taskID is retrieved from the source DIRAC jobID
connection = self.__getConnection( connection )
req = "SELECT TaskID from Tasks WHERE SourceTaskID=%s;" % int( jobID )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), '_getTaskIDForJob' ),
"%s. %s" % ( req, res['Message'] ) )
return S_ERROR( 'The supplied JobID does not exist!' )
taskID = [ row[0] for row in res['Value'] ]
return S_OK( taskID )
def getTaskSummary( self, jobID, connection = False ):
""" Obtain the task summary from the database. """
connection = self.__getConnection( connection )
res = self._getTaskIDForJob( jobID, connection = connection )
if not res['OK']:
return res
if res['Value']:
taskID = res['Value']
else:
return S_OK()
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value']
req = "SELECT R.LFN,R.SE,R.PFN,R.Size,R.Status,R.LastUpdate,R.Reason FROM CacheReplicas AS R, TaskReplicas AS TR WHERE TR.TaskID in (%s) AND TR.ReplicaID=R.ReplicaID;" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskSummary: Failed to get Replica summary for task.', res['Message'] )
return res
replicaInfo = {}
for lfn, storageElement, pfn, fileSize, status, lastupdate, reason in res['Value']:
replicaInfo[lfn] = {'StorageElement':storageElement, 'PFN':pfn, 'FileSize':fileSize,
'Status':status, 'LastUpdate':lastupdate, 'Reason':reason}
resDict = {'TaskInfo':taskInfo, 'ReplicaInfo':replicaInfo}
return S_OK( resDict )
def getTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'SubmitTime', orderAttribute = None,
limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM Tasks" % ( intListToString( self.TASKPARAMS ) )
if condDict or older or newer:
if 'ReplicaID' in condDict:
replicaIDs = condDict.pop( 'ReplicaID' )
if not isinstance( replicaIDs, ( list, tuple ) ):
replicaIDs = [replicaIDs]
res = self._getReplicaIDTasks( replicaIDs, connection = connection )
if not res['OK']:
return res
condDict['TaskID'] = res['Value']
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
tasks = res['Value']
resultDict = {}
for row in tasks:
resultDict[row[0]] = dict( zip( self.TASKPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = tasks
result['ParameterNames'] = self.TASKPARAMS
return result
def getCacheReplicas( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate', orderAttribute = None, limit = None, connection = False ):
""" Get cache replicas for the supplied selection with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM CacheReplicas" % ( intListToString( self.REPLICAPARAMS ) )
if condDict or older or newer:
if 'TaskID' in condDict:
taskIDs = condDict.pop( 'TaskID' )
if not isinstance( taskIDs, ( list, tuple ) ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
# BUG: limit is ignored unless there is a nonempty condition dictionary OR older OR newer is nonemtpy
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
cacheReplicas = res['Value']
resultDict = {}
for row in cacheReplicas:
resultDict[row[0]] = dict( zip( self.REPLICAPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = cacheReplicas
result['ParameterNames'] = self.REPLICAPARAMS
return result
def getStageRequests( self, condDict = {}, older = None, newer = None, timeStamp = 'StageRequestSubmitTime', orderAttribute = None, limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM StageRequests" % ( intListToString( self.STAGEPARAMS ) )
if condDict or older or newer:
if 'TaskID' in condDict:
taskIDs = condDict.pop( 'TaskID' )
if not isinstance( taskIDs, ( list, tuple ) ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
stageRequests = res['Value']
resultDict = {}
for row in stageRequests:
resultDict[row[0]] = dict( zip( self.STAGEPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = stageRequests
result['ParameterNames'] = self.STAGEPARAMS
return result
def _getTaskReplicaIDs( self, taskIDs, connection = False ):
if not taskIDs:
return S_OK( [] )
req = "SELECT DISTINCT(ReplicaID) FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._query( req, connection )
if not res['OK']:
return res
replicaIDs = [row[0] for row in res['Value']]
return S_OK( replicaIDs )
def _getReplicaIDTasks( self, replicaIDs, connection = False ):
if not replicaIDs:
return S_OK( [] )
req = "SELECT DISTINCT(TaskID) FROM TaskReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
taskIDs = [row[0] for row in res['Value']]
return S_OK( taskIDs )
#
# End of monitoring of stage tasks
#
################################################################
####################################################################
#
# Submission of stage requests
#
def setRequest( self, lfnDict, source, callbackMethod, sourceTaskID, connection = False ):
""" This method populates the StorageManagementDB Tasks table with the requested files. """
connection = self.__getConnection( connection )
if not lfnDict:
return S_ERROR( "No files supplied in request" )
# The first step is to create the task in the Tasks table
res = self._createTask( source, callbackMethod, sourceTaskID, connection = connection )
if not res['OK']:
return res
taskID = res['Value']
# Get the Replicas which already exist in the CacheReplicas table
allReplicaIDs = []
taskStates = []
for se, lfns in lfnDict.iteritems():
if isinstance( lfns, basestring ):
lfns = [lfns]
res = self._getExistingReplicas( se, lfns, connection = connection )
if not res['OK']:
return res
existingReplicas = res['Value']
# Insert the CacheReplicas that do not already exist
for lfn in lfns:
if lfn in existingReplicas:
gLogger.verbose( 'StorageManagementDB.setRequest: Replica already exists in CacheReplicas table %s @ %s' % ( lfn, se ) )
existingFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( existingFileState )
else:
res = self._insertReplicaInformation( lfn, se, 'Stage', connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
existingReplicas[lfn] = ( res['Value'], 'New' )
newFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( newFileState )
if not taskState in taskStates:
taskStates.append( taskState )
allReplicaIDs.extend( existingReplicas.values() )
# Insert all the replicas into the TaskReplicas table
res = self._insertTaskReplicaInformation( taskID, allReplicaIDs, connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
# Check whether the the task status is Done based on the existing file states
# If all the files for a particular Task are 'Staged', update the Task
if taskStates == ['Staged']:
# so if the tasks are for LFNs from the lfns dictionary, which are already staged,
# they immediately change state New->Done. Fixed it to translate such tasks to 'Staged' state
self.__updateTaskStatus( [taskID], 'Staged', True, connection = connection )
if 'Failed' in taskStates:
self.__updateTaskStatus( [taskID], 'Failed', True, connection = connection )
return S_OK( taskID )
def _cleanTask( self, taskID, connection = False ):
""" Remove a task and any related information """
connection = self.__getConnection( connection )
self.removeTasks( [taskID], connection = connection )
self.removeUnlinkedReplicas( connection = connection )
def _createTask( self, source, callbackMethod, sourceTaskID, connection = False ):
""" Enter the task details into the Tasks table """
connection = self.__getConnection( connection )
req = "INSERT INTO Tasks (Source,SubmitTime,CallBackMethod,SourceTaskID) VALUES ('%s',UTC_TIMESTAMP(),'%s','%s');" % ( source, callbackMethod, sourceTaskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB._createTask: Failed to create task.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_createTask',req))
taskID = res['lastRowId']
reqSelect = "SELECT * FROM Tasks WHERE TaskID = %s;" % ( taskID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.info( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_createTask', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted Tasks = %s" % ( self._caller(), '_createTask', resSelect['Value'][0] ) )
# gLogger.info("StorageManagementDB._createTask: Created task with ('%s','%s','%s') and obtained TaskID %s" % (source,callbackMethod,sourceTaskID,taskID))
return S_OK( taskID )
def _getExistingReplicas( self, storageElement, lfns, connection = False ):
""" Obtains the ReplicasIDs for the replicas already entered in the CacheReplicas table """
connection = self.__getConnection( connection )
req = "SELECT ReplicaID,LFN,Status FROM CacheReplicas WHERE SE = '%s' AND LFN IN (%s);" % ( storageElement, stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._getExistingReplicas: Failed to get existing replicas.', res['Message'] )
return res
existingReplicas = {}
for replicaID, lfn, status in res['Value']:
existingReplicas[lfn] = ( replicaID, status )
return S_OK( existingReplicas )
def _insertReplicaInformation( self, lfn, storageElement, rType, connection = False ):
""" Enter the replica into the CacheReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO CacheReplicas (Type,SE,LFN,PFN,Size,FileChecksum,GUID,SubmitTime,LastUpdate) VALUES ('%s','%s','%s','',0,'','',UTC_TIMESTAMP(),UTC_TIMESTAMP());" % ( rType, storageElement, lfn )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "_insertReplicaInformation: Failed to insert to CacheReplicas table.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertReplicaInformation',req))
replicaID = res['lastRowId']
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s;" % ( replicaID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_insertReplicaInformation', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted CacheReplicas = %s" % ( self._caller(), '_insertReplicaInformation', resSelect['Value'][0] ) )
# gLogger.verbose("_insertReplicaInformation: Inserted Replica ('%s','%s') and obtained ReplicaID %s" % (lfn,storageElement,replicaID))
return S_OK( replicaID )
def _insertTaskReplicaInformation( self, taskID, replicaIDs, connection = False ):
""" Enter the replicas into TaskReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO TaskReplicas (TaskID,ReplicaID) VALUES "
for replicaID, _status in replicaIDs:
replicaString = "(%s,%s)," % ( taskID, replicaID )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._insertTaskReplicaInformation: Failed to insert to TaskReplicas table.', res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertTaskReplicaInformation',req))
gLogger.verbose( "StorageManagementDB._insertTaskReplicaInformation: Successfully added %s CacheReplicas to Task %s." % ( res['Value'], taskID ) )
return S_OK()
#
# End of insertion methods
#
################################################################
####################################################################
def getStagedReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getStagedReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in goodTasks:
continue
elif status in ( 'Staged', 'StageSubmitted' ):
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Staged', 'TaskID':goodTasks}, connection = connection )
def getWaitingReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getWaitingReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Waiting':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Waiting', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getOfflineReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getOfflineReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Offline':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Offline', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getTasksWithStatus( self, status ):
""" This method retrieves the TaskID from the Tasks table with the supplied Status. """
req = "SELECT TaskID,Source,CallBackMethod,SourceTaskID from Tasks WHERE Status = '%s';" % status
res = self._query( req )
if not res['OK']:
return res
taskIDs = {}
for taskID, source, callback, sourceTask in res['Value']:
taskIDs[taskID] = ( source, callback, sourceTask )
return S_OK( taskIDs )
####################################################################
#
# The state transition of the CacheReplicas from *->Failed
#
def updateReplicaFailure( self, terminalReplicaIDs ):
""" This method sets the status to Failure with the failure reason for the supplied Replicas. """
res = self.updateReplicaStatus( terminalReplicaIDs.keys(), 'Failed' )
if not res['OK']:
return res
updated = res['Value']
if not updated:
return S_OK( updated )
for replicaID in updated:
reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID = %d" % ( replicaID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Reason = '%s' WHERE ReplicaID = %d" % ( terminalReplicaIDs[replicaID], replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.updateReplicaFailure: Failed to update replica fail reason.', res['Message'] )
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
return S_OK( updated )
####################################################################
#
# The state transition of the CacheReplicas from New->Waiting
#
def updateReplicaInformation( self, replicaTuples ):
""" This method set the replica size information and pfn for the requested storage element. """
for replicaID, pfn, size in replicaTuples:
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET PFN = '%s', Size = %s, Status = 'Waiting' WHERE ReplicaID = %s and Status != 'Cancelled';" % ( pfn, size, replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StagerDB.updateReplicaInformation: Failed to insert replica information.', res['Message'] )
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
gLogger.debug( 'StagerDB.updateReplicaInformation: Successfully updated CacheReplicas record With Status=Waiting, for ReplicaID= %s' % ( replicaID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from Waiting->StageSubmitted
#
def getSubmittedStagePins( self ):
# change the query to take into account pin expiry time
req = "SELECT SE,COUNT(*),SUM(Size) from CacheReplicas WHERE Status NOT IN ('New','Waiting','Offline','Failed') GROUP BY SE;"
# req = "SELECT SE,Count(*),SUM(Size) from CacheReplicas,StageRequests WHERE Status NOT IN ('New','Waiting','Failed') and CacheReplicas.ReplicaID=StageRequests.ReplicaID and PinExpiryTime>UTC_TIMESTAMP() GROUP BY SE;"
res = self._query( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getSubmittedStagePins: Failed to obtain submitted requests.', res['Message'] )
return res
storageRequests = {}
for storageElement, replicas, totalSize in res['Value']:
storageRequests[storageElement] = {'Replicas':int( replicas ), 'TotalSize':int( totalSize )}
return S_OK( storageRequests )
def insertStageRequest( self, requestDict, pinLifeTime ):
req = "INSERT INTO StageRequests (ReplicaID,RequestID,StageRequestSubmitTime,PinLength) VALUES "
for requestID, replicaIDs in requestDict.iteritems():
for replicaID in replicaIDs:
replicaString = "(%s,'%s',UTC_TIMESTAMP(),%d)," % ( replicaID, requestID, pinLifeTime )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.insertStageRequest: Failed to insert to StageRequests table.', res['Message'] )
return res
for requestID, replicaIDs in requestDict.iteritems():
for replicaID in replicaIDs:
# fix, no individual queries
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID = %s AND RequestID = '%s';" % ( replicaID, requestID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'insertStageRequest', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted StageRequests = %s" % ( self._caller(), 'insertStageRequest', resSelect['Value'][0] ) )
# gLogger.info( "%s_DB: howmany = %s" % ('insertStageRequest',res))
# gLogger.info( "%s_DB:%s" % ('insertStageRequest',req))
gLogger.debug( "StorageManagementDB.insertStageRequest: Successfully added %s StageRequests with RequestID %s." % ( res['Value'], requestID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from StageSubmitted->Staged
#
def setStageComplete( self, replicaIDs ):
# Daniela: FIX wrong PinExpiryTime (84000->86400 seconds = 1 day)
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect, resSelect['Message'] ) )
return resSelect
req = "UPDATE StageRequests SET StageStatus='Staged',StageRequestCompletedTime = UTC_TIMESTAMP(),PinExpiryTime = DATE_ADD(UTC_TIMESTAMP(),INTERVAL ( PinLength / %s ) SECOND) WHERE ReplicaID IN (%s);" % ( THROTTLING_STEPS, intListToString( replicaIDs ) )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setStageComplete: Failed to set StageRequest completed.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
reqSelect1 = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect1, resSelect1['Message'] ) )
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
gLogger.debug( "StorageManagementDB.setStageComplete: Successfully updated %s StageRequests table with StageStatus=Staged for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
return res
def wakeupOldRequests( self, replicaIDs , retryInterval, connection = False ):
"""
get only StageRequests with StageRequestSubmitTime older than 1 day AND are still not staged
delete these requests
reset Replicas with corresponding ReplicaIDs to Status='New'
"""
try:
retryInterval = max( retryInterval, 2 )
retryInterval = min( retryInterval, 24 )
retryInterval = int( retryInterval )
except Exception:
errorString = 'Wrong argument type'
gLogger.exception( errorString )
return S_ERROR( errorString )
if( replicaIDs ) > 0:
req = "SELECT ReplicaID FROM StageRequests WHERE ReplicaID IN (%s) AND StageStatus='StageSubmitted' AND DATE_ADD( StageRequestSubmitTime, INTERVAL %s HOUR ) < UTC_TIMESTAMP();" % ( intListToString( replicaIDs ), retryInterval )
res = self._query( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to select old StageRequests.", res['Message'] )
return res
old_replicaIDs = [ row[0] for row in res['Value'] ]
if len( old_replicaIDs ) > 0:
req = "UPDATE CacheReplicas SET Status='New',LastUpdate = UTC_TIMESTAMP(), Reason = 'wakeupOldRequests' WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to roll CacheReplicas back to Status=New.", res['Message'] )
return res
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests. Problem removing entries from StageRequests." )
return res
return S_OK()
####################################################################
#
# This code handles the finalization of stage tasks
#
# Daniela: useless method
'''
def updateStageCompletingTasks(self):
""" This will select all the Tasks in StageCompleting status and check whether all the associated files are Staged. """
req = "SELECT TR.TaskID,COUNT(if(R.Status NOT IN ('Staged'),1,NULL)) FROM Tasks AS T, TaskReplicas AS TR, CacheReplicas AS R WHERE T.Status='StageCompleting' AND T.TaskID=TR.TaskID AND TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID;"
res = self._query(req)
if not res['OK']:
return res
taskIDs = []
for taskID,count in res['Value']:
if int(count) == 0:
taskIDs.append(taskID)
if not taskIDs:
return S_OK(taskIDs)
req = "UPDATE Tasks SET Status = 'Staged' WHERE TaskID IN (%s);" % intListToString(taskIDs)
res = self._update(req)
if not res['OK']:
return res
return S_OK(taskIDs)
'''
def setTasksDone( self, taskIDs ):
""" This will update the status for a list of taskIDs to Done. """
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status = 'Done', CompleteTime = UTC_TIMESTAMP() WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setTasksDone: Failed to set Tasks status to Done.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
# fix, no individual queries
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
gLogger.debug( "StorageManagementDB.setTasksDone: Successfully updated %s Tasks with StageStatus=Done for taskIDs: %s." % ( res['Value'], taskIDs ) )
return res
def killTasksBySourceTaskID( self, sourceTaskIDs, connection = False ):
""" Given SourceTaskIDs (jobs), this will cancel further staging of files for the corresponding tasks.
The "cancel" is actually removing all stager DB records for these jobs.
Care must be taken to NOT cancel staging of files that are requested also by other tasks. """
connection = self.__getConnection( connection )
# get the TaskIDs
req = "SELECT TaskID from Tasks WHERE SourceTaskID IN (%s);" % intListToString( sourceTaskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
taskIDs = [ row[0] for row in res['Value'] ]
# ! Make sure to only cancel file staging for files with no relations with other tasks (jobs) but the killed ones
if taskIDs:
req = "SELECT DISTINCT(CR.ReplicaID) FROM TaskReplicas AS TR, CacheReplicas AS CR WHERE TR.TaskID IN (%s) AND CR.Links=1 and TR.ReplicaID=CR.ReplicaID;" % intListToString( taskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
replicaIDs = [ row[0] for row in res['Value'] ]
if replicaIDs:
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID in (%s) AND Links=1;" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
# Finally, remove the Task and TaskReplicas entries.
res = self.removeTasks( taskIDs, connection )
return res
def removeStageRequests( self, replicaIDs, connection = False ):
connection = self.__getConnection( connection )
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeStageRequests. Problem removing entries from StageRequests." )
return res
return res
def removeTasks( self, taskIDs, connection = False ):
""" This will delete the entries from the TaskReplicas for the provided taskIDs. """
connection = self.__getConnection( connection )
req = "DELETE FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from TaskReplicas." )
return res
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeTasks', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete Tasks = %s" % ( self._caller(), 'removeTasks', record ) )
req = "DELETE FROM Tasks WHERE TaskID in (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from Tasks." )
gLogger.verbose( "%s.%s_DB: deleted Tasks" % ( self._caller(), 'removeTasks' ) )
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
return res
def setOldTasksAsFailed( self, daysOld, connection = False ):
"""
Set Tasks older than "daysOld" number of days to Failed
These tasks have already been retried every day for staging
"""
req = "UPDATE Tasks SET Status='Failed' WHERE DATE_ADD(SubmitTime, INTERVAL %s DAY ) < UTC_TIMESTAMP();" % ( daysOld )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.setOldTasksAsFailed. Problem setting old Tasks to Failed." )
return res
return res
def getCacheReplicasSummary( self, connection = False ):
"""
Reports breakdown of file number/size in different staging states across storage elements
"""
connection = self.__getConnection( connection )
req = "SELECT DISTINCT(Status),SE,COUNT(*),sum(size)/(1024*1024*1024) FROM CacheReplicas GROUP BY Status,SE;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.getCacheReplicasSummary failed." )
return res
resSummary = {}
i = 1
for status, se, numFiles, sumFiles in res['Value']:
resSummary[i] = {'Status':status, 'SE':se, 'NumFiles':long( numFiles ), 'SumFiles':float( sumFiles )}
i += 1
return S_OK( resSummary )
def removeUnlinkedReplicas( self, connection = False ):
""" This will remove Replicas from the CacheReplicas that are not associated to any Task.
If the Replica has been Staged,
wait until StageRequest.PinExpiryTime and remove the StageRequest and CacheReplicas entries
"""
connection = self.__getConnection( connection )
# First, check if there is a StageRequest and PinExpiryTime has arrived
req = "select SR.ReplicaID from CacheReplicas CR,StageRequests SR WHERE CR.Links = 0 and CR.ReplicaID=SR.ReplicaID group by SR.ReplicaID HAVING max(SR.PinExpiryTime) < UTC_TIMESTAMP();"
# req = "SELECT ReplicaID from CacheReplicas WHERE Links = 0;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
return res
replicaIDs = [ row[0] for row in res['Value'] ]
# Look for Failed CacheReplicas which are not associated to any Task. These have no PinExpiryTime in StageRequests
# as they were not staged successfully (for various reasons), even though a staging request had been submitted
req = "SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND Status = 'Failed';"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0 AND Status=Failed." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if replicaIDs:
# Removed the entries from the StageRequests table that are expired
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas',
reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete StageRequests = %s" % ( self._caller(), 'removeUnlinkedReplicas',
record ) )
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem deleting from StageRequests." )
return res
gLogger.verbose( "%s.%s_DB: deleted StageRequests" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s StageRequests entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
# Second look for CacheReplicas for which there is no entry in StageRequests
req = 'SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND ReplicaID NOT IN ( SELECT DISTINCT( ReplicaID ) FROM StageRequests )'
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if not replicaIDs:
return S_OK()
# Now delete all CacheReplicas
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete CacheReplicas = %s" % ( self._caller(), 'removeUnlinkedReplicas', record ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID IN (%s) AND Links= 0;" % intListToString( replicaIDs )
res = self._update( req, connection )
if res['OK']:
gLogger.verbose( "%s.%s_DB: deleted CacheReplicas" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s CacheReplicas entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
else:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem removing entries from CacheReplicas." )
return res
|
hgiemza/DIRAC
|
StorageManagementSystem/DB/StorageManagementDB.py
|
Python
|
gpl-3.0
| 56,306
|
[
"DIRAC"
] |
b2fa1d4304a99c06bff27a96f7e30a940b86a4a9e4fc8edb28f2cc0fa3048aaa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.