text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#-*- coding: utf-8 -*-
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Configuration file')
arg_lists = []
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ('true', '1')
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--input_embed', type=int, default=128, help='actor input embedding')
net_arg.add_argument('--hidden_dim', type=int, default=128, help='actor LSTM num_neurons')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--batch_size', type=int, default=256, help='batch size')
data_arg.add_argument('--input_dimension', type=int, default=2, help='city dimension')
data_arg.add_argument('--max_length', type=int, default=20, help='number of deliveries') # this excludes depot
data_arg.add_argument('--speed', type=float, default=10.0, help='agent speed') ############################### speed 10
data_arg.add_argument('--kNN', type=int, default=5, help='int for random k_nearest_neighbor') ################ kNN 5
data_arg.add_argument('--width_mean', type=float, default=30.0, help='tw width gaussian distribution mean') ### [5,2] n20w20, [11,5] n20w40, [17,7] n20w60
data_arg.add_argument('--width_std', type=float, default=11.0, help='tw width gaussian distribution std') ##### [22,9] n20w80, [30,11] n20w100
data_arg.add_argument('--dir_', type=str, default='n20w100', help='Dumas benchmarch instances') ###############
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--nb_epoch', type=int, default=220000, help='nb epoch')
train_arg.add_argument('--lr1_start', type=float, default=0.001, help='actor learning rate')
train_arg.add_argument('--lr1_decay_step', type=int, default=5000, help='lr1 decay step')
train_arg.add_argument('--lr1_decay_rate', type=float, default=0.96, help='lr1 decay rate')
train_arg.add_argument('--beta', type=int, default=10, help='weight for TW constraint') ###################### 3 during training / 10 for test
train_arg.add_argument('--temperature', type=float, default=3.0, help='pointer_net initial temperature') #####
train_arg.add_argument('--C', type=float, default=10.0, help='pointer_net tan clipping')
# Misc
misc_arg = add_argument_group('User options') #####################################################
misc_arg.add_argument('--pretrain', type=str2bool, default=False, help='faster datagen for infinite speed')
misc_arg.add_argument('--inference_mode', type=str2bool, default=True, help='switch to inference mode when model is trained')
misc_arg.add_argument('--restore_model', type=str2bool, default=True, help='whether or not model is retrieved')
misc_arg.add_argument('--save_to', type=str, default='speed10/s10_k5_n20w100', help='saver sub directory') #####################
misc_arg.add_argument('--restore_from', type=str, default='speed10/s10_k5_n20w100', help='loader sub directory') ###############
misc_arg.add_argument('--log_dir', type=str, default='summary/test', help='summary writer log directory')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
def print_config():
config, _ = get_config()
print('\n')
print('Data Config:')
print('* Batch size:',config.batch_size)
print('* Sequence length:',config.max_length)
print('* City coordinates:',config.input_dimension)
print('\n')
print('Network Config:')
print('* Restored model:',config.restore_model)
print('* Actor input embedding:',config.input_embed)
print('* Actor hidden_dim (num neurons):',config.hidden_dim)
print('* Actor tan clipping:',config.C)
print('\n')
if config.inference_mode==False:
print('Training Config:')
print('* Nb epoch:',config.nb_epoch)
print('* Temperature:',config.temperature)
print('* Actor learning rate (init,decay_step,decay_rate):',config.lr1_start,config.lr1_decay_step,config.lr1_decay_rate)
else:
print('Testing Config:')
print('* Summary writer log dir:',config.log_dir)
print('\n')
|
MichelDeudon/neural-combinatorial-optimization-rl-tensorflow
|
Ptr_Net_TSPTW/config.py
|
Python
|
mit
| 4,073
|
[
"Gaussian"
] |
52eeb4b3449caa4cedb36953c6a4d14d8d7f837659cd96fd5b8d065a11b5912e
|
"""Module of objects that resemble or contain a profile, i.e. a 1 or 2-D f(x) representation."""
import enum
import warnings
from typing import Union, Tuple, Sequence, List, Optional
import argue
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Circle as mpl_Circle
from scipy import ndimage, signal
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
from scipy.optimize import OptimizeWarning, minimize
from scipy.stats import linregress
from .geometry import Point, Circle
from .hill import Hill
from .typing import NumberLike
from .utilities import convert_to_enum
# for Hill fits of 2D device data the # of points can be small.
# This results in optimization warnings about the variance of the fit (the variance isn't of concern for us for that particular item)
warnings.simplefilter("ignore", OptimizeWarning)
def stretch(array: np.ndarray, min: int=0, max: int=1, fill_dtype: Optional[np.dtype]=None) -> np.ndarray:
"""'Stretch' the profile to the fit a new min and max value and interpolate in between.
From: http://www.labri.fr/perso/nrougier/teaching/numpy.100/ exercise #17
Parameters
----------
array: numpy.ndarray
The numpy array to stretch.
min : number
The new minimum of the values.
max : number
The new maximum value.
fill_dtype : numpy data type
If None (default), the array will be stretched to the passed min and max.
If a numpy data type (e.g. np.int16), the array will be stretched to fit the full range of values
of that data type. If a value is given for this parameter, it overrides ``min`` and ``max``.
"""
new_max = max
new_min = min
if fill_dtype is not None:
try:
di = np.iinfo(fill_dtype)
except ValueError:
di = np.finfo(fill_dtype)
new_max = di.max
new_min = di.min
# perfectly normalize the array (0..1). ground, then div by range
stretched_array = (array - array.min())/(array.max() - array.min())
# stretch normalized array to new max/min
stretched_array *= new_max
# stretched_array += new_min
if fill_dtype:
stretched_array = stretched_array.astype(fill_dtype)
return stretched_array
class ProfileMixin:
"""A mixin to provide various manipulations of 1D profile data."""
values: np.ndarray
def invert(self) -> None:
"""Invert (imcomplement) the profile."""
orig_array = self.values
self.values = -orig_array + orig_array.max() + orig_array.min()
def normalize(self, norm_val: Union[str, NumberLike]='max') -> None:
"""Normalize the profile to the given value.
Parameters
----------
norm_val : str, number
If a string, must be 'max', which normalizes the values to the maximum value.
If a number, normalizes all values to that number.
"""
if norm_val == 'max':
val = self.values.max()
else:
val = norm_val
self.values /= val
def stretch(self, min: NumberLike=0, max: NumberLike=1) -> None:
"""'Stretch' the profile to the min and max parameter values.
Parameters
----------
min : number
The new minimum of the values
max : number
The new maximum value.
"""
self.values = stretch(self.values, min=min, max=max)
def ground(self) -> float:
"""Ground the profile such that the lowest value is 0.
Returns
-------
float
The minimum value that was used as the grounding value.
"""
min_val = self.values.min()
self.values = self.values - min_val
return min_val
@argue.options(kind=('median', 'gaussian'))
def filter(self, size: NumberLike=0.05, kind: str='median') -> None:
"""Filter the profile.
Parameters
----------
size : float, int
Size of the median filter to apply.
If a float, the size is the ratio of the length. Must be in the range 0-1.
E.g. if size=0.1 for a 1000-element array, the filter will be 100 elements.
If an int, the filter is the size passed.
kind : {'median', 'gaussian'}
The kind of filter to apply. If gaussian, `size` is the sigma value.
"""
if isinstance(size, float):
if 0 < size < 1:
size = int(round(len(self.values)*size))
size = max(size, 1)
else:
raise TypeError("Float was passed but was not between 0 and 1")
if kind == 'median':
self.values = ndimage.median_filter(self.values, size=size)
elif kind == 'gaussian':
self.values = ndimage.gaussian_filter(self.values, sigma=size)
def __len__(self):
return len(self.values)
def __getitem__(self, items):
return self.values[items]
class Interpolation(enum.Enum):
"""Interpolation Enum"""
NONE = None #:
LINEAR = 'Linear' #:
SPLINE = 'Spline' #:
class Normalization(enum.Enum):
"""Normalization method Enum"""
NONE = None #:
GEOMETRIC_CENTER = 'Geometric center' #:
BEAM_CENTER = 'Beam center' #:
MAX = 'Max' #:
class Edge(enum.Enum):
"""Edge detection Enum"""
FWHM = 'FWHM' #:
INFLECTION_DERIVATIVE = 'Inflection Derivative' #:
INFLECTION_HILL = 'Inflection Hill' #:
class SingleProfile(ProfileMixin):
"""A profile that has one large signal, e.g. a radiation beam profile.
Signal analysis methods are given, mostly based on FWXM and on Hill function calculations.
Profiles with multiple peaks are better suited by the MultiProfile class.
"""
def __init__(self, values: np.ndarray, dpmm: float = None,
interpolation: Union[Interpolation, str, None] = Interpolation.LINEAR,
ground: bool = True,
interpolation_resolution_mm: float = 0.1,
interpolation_factor: float = 10,
normalization_method: Union[Normalization, str] = Normalization.BEAM_CENTER,
edge_detection_method: Union[Edge, str] = Edge.FWHM,
edge_smoothing_ratio: float = 0.003,
hill_window_ratio: float = 0.1):
"""
Parameters
----------
values
The profile numpy array. Must be 1D.
dpmm
The dots (pixels) per mm. Pass to get info like beam width in distance units in addition to pixels
interpolation
Interpolation technique.
ground
Whether to ground the profile (set min value to 0). Helpful most of the time.
interpolation_resolution_mm
The resolution that the interpolation will scale to. **Only used if dpmm is passed and interpolation is set**.
E.g. if the dpmm is 0.5 and the resolution is set to 0.1mm the data will be interpolated to have a new dpmm of 10 (1/0.1).
interpolation_factor
The factor to multiply the data by. **Only used if interpolation is used and dpmm is NOT passed**. E.g. 10
will perfectly decimate the existing data according to the interpolation method passed.
normalization_method
How to pick the point to normalize the data to.
edge_detection_method
The method by which to detect the field edge. FWHM is reasonable most of the time except for FFF beams.
Inflection-derivative will use the max gradient to determine the field edge. Note that this may not be the
50% height. In fact, for FFF beams it shouldn't be. Inflection methods are better for FFF and other unusual
beam shapes.
edge_smoothing_ratio
**Only applies to INFLECTION_DERIVATIVE and INFLECTION_HILL.**
The ratio of the length of the values to use as the sigma for a Gaussian filter applied before searching for
the inflection. E.g. 0.005 with a profile of 1000 points will result in a sigma of 5.
This helps make the inflection point detection more robust to noise. Increase for noisy data.
hill_window_ratio
The ratio of the field size to use as the window to fit the Hill function. E.g. 0.2 will using a window
centered about each edge with a width of 20% the size of the field width. **Only applies when the edge
detection is INFLECTION_HILL**.
"""
self._interp_method = convert_to_enum(interpolation, Interpolation)
self._interpolation_res = interpolation_resolution_mm
self._interpolation_factor = interpolation_factor
self._norm_method = convert_to_enum(normalization_method, Normalization)
self._edge_method = convert_to_enum(edge_detection_method, Edge)
self._edge_smoothing_ratio = edge_smoothing_ratio
self._hill_window_ratio = hill_window_ratio
self.values = values # set initial data so we can do things like find beam center
self.dpmm = dpmm
fitted_values, new_dpmm, x_indices = self._interpolate(values, dpmm, interpolation_resolution_mm,
interpolation_factor, self._interp_method)
self.dpmm = new_dpmm # update as needed
self.values = fitted_values
self.x_indices = x_indices
if ground:
fitted_values -= fitted_values.min()
norm_values = self._normalize(fitted_values, self._norm_method)
self.values = norm_values # update values
@staticmethod
def _interpolate(values, dpmm, interpolation_resolution, interpolation_factor, interp_method: Interpolation) -> (
np.ndarray, float, float, float):
"""Fit the data to the passed interpolation method. Will also calculate the new values to correct the measurements such as dpmm"""
x_indices = list(range(len(values)))
if interp_method == Interpolation.NONE:
return values, dpmm, x_indices # do nothing
elif interp_method == Interpolation.LINEAR:
if dpmm is not None:
samples = int(round((len(x_indices)-1)/(dpmm*interpolation_resolution)))
new_dpmm = 1/interpolation_resolution
else:
samples = int(round((len(x_indices)-1)*interpolation_factor))
new_dpmm = None
f = interp1d(x_indices, values, kind='linear', bounds_error=True)
new_x = np.linspace(0, len(x_indices)-1, num=samples)
return f(new_x), new_dpmm, new_x
elif interp_method == Interpolation.SPLINE:
if dpmm is not None:
samples = int(round((len(x_indices)-1)/(dpmm*interpolation_resolution)))
new_dpmm = 1 / interpolation_resolution
else:
samples = int(round((len(x_indices)-1)*interpolation_factor))
new_dpmm = None
f = interp1d(x_indices, values, kind='cubic')
new_x = np.linspace(0, len(x_indices)-1, num=samples)
return f(new_x), new_dpmm, new_x
def _normalize(self, values, method: Normalization) -> np.ndarray:
"""Normalize the data given a method."""
if method == Normalization.NONE:
return values
elif method == Normalization.MAX:
return values / values.max()
elif method == Normalization.GEOMETRIC_CENTER:
return values / self._geometric_center(values)['value (exact)']
elif method == Normalization.BEAM_CENTER:
return values / self.beam_center()['value (@rounded)']
def _geometric_center(self, values) -> dict:
"""Returns the center index and value of the profile.
If the profile has an even number of values the centre lies between the two centre indices and the centre
value is the average of the two centre values else the centre index and value are returned."""
plen = values.shape[0]
# buffer overflow can cause the below addition to give strange results
values = values.astype(np.float64)
if plen % 2 == 0: # plen is even and central detectors straddle CAX
cax = (values[int(plen / 2)] + values[int(plen / 2) - 1]) / 2.0
else: # plen is odd and we have a central detector
cax = values[int((plen - 1) / 2)]
plen = (plen - 1)/2.0
return {'index (exact)': plen, 'value (exact)': cax}
def geometric_center(self) -> dict:
"""The geometric center (i.e. the device center)"""
return self._geometric_center(self.values)
def beam_center(self) -> dict:
"""The center of the detected beam. This can account for asymmetries in the beam position (e.g. offset jaws)"""
if self._edge_method == Edge.FWHM:
data = self.fwxm_data(x=50)
return {'index (rounded)': data['center index (rounded)'],
'index (exact)': data['center index (exact)'],
'value (@rounded)': data['center value (@rounded)']}
elif self._edge_method in (Edge.INFLECTION_DERIVATIVE, Edge.INFLECTION_HILL):
infl = self.inflection_data()
mid_point = infl['left index (exact)'] + (infl['right index (exact)'] - infl['left index (exact)']) / 2
return {'index (rounded)': int(round(mid_point)),
'index (exact)': mid_point,
'value (@rounded)': self.values[int(round(mid_point))]}
@argue.bounds(x=(0, 100))
def fwxm_data(self, x: int = 50) -> dict:
"""Return the width at X-Max, where X is the percentage height.
Parameters
----------
x
The percent height of the profile. E.g. x = 50 is 50% height,
i.e. FWHM.
"""
_, peak_props = find_peaks(self.values, fwxm_height=x/100, max_number=1)
left_idx = peak_props['left_ips'][0]
right_idx = peak_props['right_ips'][0]
fwxm_center_idx = ((peak_props['right_ips'][0] - peak_props['left_ips'][0]) / 2 + peak_props['left_ips'][0])
data = {'width (exact)': peak_props['widths'][0],
'width (rounded)': int(round(right_idx)) - int(round(left_idx)),
'center index (rounded)': int(round(fwxm_center_idx)),
'center index (exact)': fwxm_center_idx,
'center value (@rounded)': self.values[int(round(fwxm_center_idx))],
'left index (exact)': left_idx,
'left index (rounded)': int(round(left_idx)),
'left value (@rounded)': self.values[int(round(left_idx))],
'right index (exact)': right_idx,
'right index (rounded)': int(round(right_idx)),
'right value (@rounded)': self.values[int(round(right_idx))],
'field values': self.values[int(round(left_idx)):
int(round(right_idx))],
'peak_props': peak_props}
if self.dpmm:
data['width (exact) mm'] = data['width (exact)'] / self.dpmm
data['left distance (exact) mm'] = abs(data['center index (exact)'] - data['left index (exact)']) / self.dpmm
data['right distance (exact) mm'] = abs(data['right index (exact)'] - data['center index (exact)']) / self.dpmm
return data
@argue.bounds(in_field_ratio=(0, 1.0), slope_exclusion_ratio=(0, 1.0))
def field_data(self, in_field_ratio: float = 0.8, slope_exclusion_ratio=0.2) -> dict:
"""Return the width at X-Max, where X is the percentage height.
Parameters
----------
in_field_ratio
In Field Ratio: 1.0 is the entire detected field; 0.8 would be the central 80%, etc.
slope_exclusion_ratio
Ratio of the field width to use as the cutoff between "top" calculation and "slope" calculation. Useful for FFF beams.
This area is centrally located in the field. E.g. 0.2 will use the central 20% of the field to calculate
the "top" value. To calculate the slope of each side, the field width between the edges of the in_field_ratio
and the slope exclusion region are used.
.. warning:: The "top" value is always calculated. For FFF beams this should be reasonable, but for flat beams
this value may end up being non-sensible.
"""
if slope_exclusion_ratio >= in_field_ratio:
raise ValueError("The exclusion region must be smaller than the field ratio")
if self._edge_method == Edge.FWHM:
data = self.fwxm_data(x=50)
beam_center_idx = data['center index (exact)']
full_width = data['width (exact)']
elif self._edge_method in (Edge.INFLECTION_DERIVATIVE, Edge.INFLECTION_HILL):
infl_data = self.inflection_data()
beam_center_idx = self.beam_center()['index (exact)']
full_width = infl_data['right index (exact)'] - infl_data['left index (exact)']
beam_center_idx_r = int(round(beam_center_idx))
cax_idx = self.geometric_center()['index (exact)']
cax_idx_r = int(round(cax_idx))
field_left_idx = beam_center_idx - in_field_ratio * full_width / 2
field_left_idx_r = int(round(field_left_idx))
field_right_idx = beam_center_idx + in_field_ratio * full_width / 2
field_right_idx_r = int(round(field_right_idx))
field_width = field_right_idx - field_left_idx
# slope calcs
inner_left_idx = beam_center_idx - slope_exclusion_ratio*field_width/2
inner_left_idx_r = int(round(inner_left_idx))
inner_right_idx = beam_center_idx + slope_exclusion_ratio*field_width/2
inner_right_idx_r = int(round(inner_right_idx))
left_fit = linregress(range(field_left_idx_r, inner_left_idx_r),
self.values[field_left_idx_r:inner_left_idx_r])
right_fit = linregress(range(inner_right_idx_r, field_right_idx_r),
self.values[inner_right_idx_r:field_right_idx_r])
# top calc
fit_params = np.polyfit(range(inner_left_idx_r, inner_right_idx_r),
self.values[inner_left_idx_r:inner_right_idx_r], deg=2)
width = abs(inner_right_idx_r - inner_left_idx_r)
def poly_func(x):
# return the negative since we're MINIMIZING and want the top value
return -(fit_params[0] * (x ** 2) + fit_params[1] * x + fit_params[2])
# minimize the polynomial function
min_f = minimize(poly_func, x0=(inner_left_idx_r+width/2,), bounds=((inner_left_idx_r, inner_right_idx_r),))
top_idx = min_f.x[0]
top_val = -min_f.fun
data = {'width (exact)': field_width,
'beam center index (exact)': beam_center_idx,
'beam center index (rounded)': beam_center_idx_r,
'beam center value (@rounded)': self.values[int(round(beam_center_idx))],
'cax index (exact)': cax_idx,
'cax index (rounded)': cax_idx_r,
'cax value (@rounded)': self.values[int(round(cax_idx))],
'left index (exact)': field_left_idx,
'left index (rounded)': field_left_idx_r,
'left value (@rounded)': self.values[int(round(field_left_idx))],
'left slope': left_fit.slope,
'left intercept': left_fit.intercept,
'right slope': right_fit.slope,
'right intercept': right_fit.intercept,
'left inner index (exact)': inner_left_idx,
'left inner index (rounded)': inner_left_idx_r,
'right inner index (exact)': inner_right_idx,
'right inner index (rounded)': inner_right_idx_r,
'"top" index (exact)': top_idx,
'"top" index (rounded)': int(round(top_idx)),
'"top" value (@exact)': top_val,
'top params': fit_params,
'right index (exact)': field_right_idx,
'right index (rounded)': field_right_idx_r,
'right value (@rounded)': self.values[int(round(field_right_idx))],
'field values': self.values[int(round(field_left_idx)):
int(round(field_right_idx))]}
if self.dpmm:
data['width (exact) mm'] = data['width (exact)'] / self.dpmm
data['left slope (%/mm)'] = data['left slope'] * self.dpmm * 100
data['right slope (%/mm)'] = data['right slope'] * self.dpmm * 100
data['left distance->beam center (exact) mm'] = abs(data['beam center index (exact)'] - data['left index (exact)']) / self.dpmm
data['right distance->beam center (exact) mm'] = abs(data['right index (exact)'] - data['beam center index (exact)']) / self.dpmm
data['left distance->CAX (exact) mm'] = abs(data['cax index (exact)'] - data['left index (exact)']) / self.dpmm
data['right distance->CAX (exact) mm'] = abs(data['cax index (exact)'] - data['right index (exact)']) / self.dpmm
data['left distance->top (exact) mm'] = abs(data['"top" index (exact)'] - data['left index (exact)']) / self.dpmm
data['right distance->top (exact) mm'] = abs(data['"top" index (exact)'] - data['right index (exact)']) / self.dpmm
data['"top"->beam center (exact) mm'] = (data['"top" index (exact)'] - data['beam center index (exact)']) / self.dpmm
data['"top"->CAX (exact) mm'] = abs(data['"top" index (exact)'] - data['cax index (exact)']) / self.dpmm
return data
def inflection_data(self) -> dict:
"""Calculate the profile inflection values using either the 2nd derivative or a fitted Hill function.
.. note::
This only applies if the edge detection method is `INFLECTION_...`.
Parameters
----------
"""
# get max/min of the gradient, which is basically the same as the 2nd deriv 0-crossing
if self._edge_method == Edge.FWHM:
raise ValueError("FWHM edge method does not have inflection points. Use a different edge detection method")
d1 = np.gradient(gaussian_filter1d(self.values, sigma=self._edge_smoothing_ratio * len(self.values)))
(peak_idxs, _) = MultiProfile(d1).find_peaks(threshold=0.8)
(valley_idxs, _) = MultiProfile(d1).find_valleys(threshold=0.8)
left_idx = peak_idxs[0] # left-most index
right_idx = valley_idxs[-1] # right-most index
if self._edge_method == Edge.INFLECTION_DERIVATIVE:
data = {'left index (rounded)': left_idx,
'left index (exact)': left_idx,
'right index (rounded)': right_idx,
'right index (exact)': right_idx,
'left value (@rounded)': self.values[int(round(left_idx))],
'left value (@exact)': self.values[int(round(left_idx))],
'right value (@rounded)': self.values[int(round(right_idx))],
'right value (@exact)': self.values[int(round(right_idx))]
}
return data
else: # Hill
# the 2nd deriv is a good approximation for the inflection point. Start there and fit Hill about it
# penum_half_window = self.field_data()['width (exact)'] * self._hill_window_ratio / 2
penum_half_window = int(round(self._hill_window_ratio * abs(right_idx - left_idx) / 2))
# left side
x_data = np.array([x for x in np.arange(left_idx - penum_half_window, left_idx + penum_half_window) if x >=0])
y_data = self.values[x_data]
# y_data = self.values[left_idx - penum_half_window: left_idx + penum_half_window]
left_hill = Hill.fit(x_data, y_data)
left_infl = left_hill.inflection_idx()
# right side
x_data = np.array([x for x in np.arange(right_idx - penum_half_window, right_idx + penum_half_window) if x < len(d1)])
y_data = self.values[x_data]
right_hill = Hill.fit(x_data, y_data)
right_infl = right_hill.inflection_idx()
data = {'left index (rounded)': left_infl['index (rounded)'],
'left index (exact)': left_infl['index (exact)'],
'right index (rounded)': right_infl['index (rounded)'],
'right index (exact)': right_infl['index (exact)'],
'left value (@exact)': left_hill.y(left_infl['index (exact)']),
'right value (@exact)': right_hill.y(right_infl['index (exact)']),
'left Hill params': left_hill.params, 'right Hill params': right_hill.params}
return data
def penumbra(self, lower: int = 20, upper: int = 80):
"""Calculate the penumbra of the field. Dependent on the edge detection method.
Parameters
----------
lower
The lower % of the beam to use. If the edge method is FWHM, this is the typical % penumbra you're thinking.
If the inflection method is used it will be the value/50 of the inflection point value. E.g. if the inflection
point is perfectly at 50% with a ``lower`` of 20, then the penumbra value here will be 20% of the maximum.
If the inflection point is at 30% of the max value (say for a FFF beam) then the lower penumbra will be ``lower/50``
of the inflection point or ``0.3*lower/50``.
upper
Upper % of the beam to use. See lower for details.
"""
if lower > upper:
raise ValueError("Upper penumbra value must be larger than the lower penumbra value")
if self._edge_method == Edge.FWHM:
upper_data = self.fwxm_data(x=upper)
lower_data = self.fwxm_data(x=lower)
data = {f'left {lower}% index (exact)': lower_data['left index (exact)'],
f'left {lower}% value (@rounded)': lower_data['left value (@rounded)'],
f'left {upper}% index (exact)': upper_data['left index (exact)'],
f'left {upper}% value (@rounded)': upper_data['left value (@rounded)'],
f'right {lower}% index (exact)': lower_data['right index (exact)'],
f'right {lower}% value (@rounded)': lower_data['right value (@rounded)'],
f'right {upper}% index (exact)': upper_data['right index (exact)'],
f'right {upper}% value (@rounded)': upper_data['right value (@rounded)'],
'left values': self.values[lower_data['left index (rounded)']:upper_data['left index (rounded)']],
'right values': self.values[upper_data['right index (rounded)']:lower_data['right index (rounded)']],
f'left penumbra width (exact)': abs(upper_data['left index (exact)'] - lower_data['left index (exact)']),
f'right penumbra width (exact)': abs(upper_data['right index (exact)'] - lower_data['right index (exact)']),
}
if self.dpmm:
data['left penumbra width (exact) mm'] = data['left penumbra width (exact)'] / self.dpmm
data['right penumbra width (exact) mm'] = data['right penumbra width (exact)'] / self.dpmm
return data
elif self._edge_method == Edge.INFLECTION_DERIVATIVE:
infl_data = self.inflection_data()
lower_left_value = infl_data['left value (@rounded)']*lower/50*100
upper_left_value = infl_data['left value (@rounded)']*upper/50*100
upper_left_data = self.fwxm_data(x=upper_left_value)
lower_left_data = self.fwxm_data(x=lower_left_value)
lower_right_value = infl_data['right value (@exact)']*lower/50*100
upper_right_value = infl_data['right value (@exact)']*upper/50*100
upper_right_data = self.fwxm_data(x=upper_right_value)
lower_right_data = self.fwxm_data(x=lower_right_value)
data = {f'left {lower}% index (exact)': lower_left_data['left index (exact)'],
f'left {upper}% index (exact)': upper_left_data['left index (exact)'],
f'right {lower}% index (exact)': lower_right_data['right index (exact)'],
f'right {upper}% index (exact)': upper_right_data['right index (exact)'],
'left values': self.values[lower_left_data['left index (rounded)']:upper_left_data['left index (rounded)']],
'right values': self.values[upper_right_data['right index (rounded)']:lower_right_data['right index (rounded)']],
f'left penumbra width (exact)': abs(upper_left_data['left index (exact)'] - lower_left_data['left index (exact)']),
f'right penumbra width (exact)': abs(upper_right_data['right index (exact)'] - lower_right_data['right index (exact)']),
}
if self.dpmm:
data['left penumbra width (exact) mm'] = data['left penumbra width (exact)'] / self.dpmm
data['right penumbra width (exact) mm'] = data['right penumbra width (exact)'] / self.dpmm
return data
elif self._edge_method == Edge.INFLECTION_HILL:
infl_data = self.inflection_data()
left_hill = Hill.from_params(infl_data['left Hill params'])
right_hill = Hill.from_params(infl_data['right Hill params'])
lower_left_value = infl_data['left value (@exact)']*lower/50
lower_left_index = left_hill.x(lower_left_value)
upper_left_value = infl_data['left value (@exact)']*upper/50
upper_left_index = left_hill.x(upper_left_value)
lower_right_value = infl_data['right value (@exact)']*lower/50
lower_right_index = right_hill.x(lower_right_value)
upper_right_value = infl_data['right value (@exact)']*upper/50
upper_right_index = right_hill.x(upper_right_value)
data = {f'left {lower}% index (exact)': lower_left_index,
f'left {lower}% value (exact)': lower_left_value,
f'left {upper}% index (exact)': upper_left_index,
f'left {upper}% value (exact)': upper_left_value,
f'right {lower}% index (exact)': lower_right_index,
f'right {lower}% value (exact)': lower_right_value,
f'right {upper}% index (exact)': upper_right_index,
f'right {upper}% value (exact)': upper_right_value,
'left values': self.values[int(round(lower_left_index)):int(round(upper_left_index))],
'right values': self.values[int(round(upper_right_index)):int(round(lower_right_index))],
f'left penumbra width (exact)': abs(upper_left_index - lower_left_index),
f'right penumbra width (exact)': abs(upper_right_index - lower_right_index),
f'left gradient (exact)': left_hill.gradient_at(infl_data['left index (exact)']),
r'right gradient (exact)': right_hill.gradient_at(infl_data['right index (exact)']),
}
if self.dpmm:
data['left penumbra width (exact) mm'] = data['left penumbra width (exact)'] / self.dpmm
data['left gradient (exact) %/mm'] = data['left gradient (exact)'] * self.dpmm * 100 # 100 to convert to %
data['right penumbra width (exact) mm'] = data['right penumbra width (exact)'] / self.dpmm
data['right gradient (exact) %/mm'] = data['right gradient (exact)'] * self.dpmm * 100
return data
@argue.options(calculation=('mean', 'median', 'max', 'min', 'area'))
def field_calculation(self, in_field_ratio: float=0.8, calculation: str='mean') -> Union[float, Tuple[float, float]]:
"""Perform an operation on the field values of the profile.
This function is useful for determining field symmetry and flatness.
Parameters
----------
in_field_ratio
Ratio of the field width to use in the calculation.
calculation : {'mean', 'median', 'max', 'min', 'area'}
Calculation to perform on the field values.
"""
field_values = self.field_data(in_field_ratio)
if calculation == 'mean':
return field_values['field values'].mean()
elif calculation == 'median':
return float(np.median(field_values['field values']))
elif calculation == 'max':
return field_values['field values'].max()
elif calculation == 'min':
return field_values['field values'].min()
def plot(self) -> None:
"""Plot the profile."""
plt.plot(self.values)
plt.show()
class MultiProfile(ProfileMixin):
"""A class for analyzing 1-D profiles that contain multiple signals. Methods are mostly for *finding & filtering*
the signals, peaks, valleys, etc. Profiles with a single peak (e.g. radiation beam profiles) are better suited by the SingleProfile class.
Attributes
----------
values : ndarray
The array of values passed in on instantiation.
peaks : list
List of Points, containing value and index information.
valleys : list
Same as peaks, but for valleys.
"""
values: Union[np.ndarray, Sequence]
peaks: List
valleys: List
def __init__(self, values: Union[np.ndarray, Sequence]):
"""
Parameters
----------
values : iterable
Array of profile values.
"""
self.values = values
self.peaks = []
self.valleys = []
def plot(self, ax: Optional[plt.Axes]=None) -> None:
"""Plot the profile.
Parameters
----------
ax: plt.Axes
An axis to plot onto. Optional.
"""
if ax is None:
fig, ax = plt.subplots()
ax.plot(self.values)
peaks_x = [peak.idx for peak in self.peaks]
peaks_y = [peak.value for peak in self.peaks]
ax.plot(peaks_x, peaks_y, "gv")
valley_x = [peak.idx for peak in self.valleys]
valley_y = [peak.value for peak in self.valleys]
ax.plot(valley_x, valley_y, "r^")
def find_peaks(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05, max_number: int=None,
search_region: Tuple=(0.0, 1.0), peak_sort='prominences') -> Tuple[np.ndarray, np.ndarray]:
"""Find the peaks of the profile using a simple maximum value search. This also sets the `peaks` attribute.
Parameters
----------
threshold : int, float
The value the peak must be above to be considered a peak. This removes "peaks"
that are in a low-value region.
If passed an int, the actual value is the threshold.
E.g. when passed 15, any peak less with a value <15 is removed.
If passed a float, it will threshold as a percent. Must be between 0 and 1.
E.g. when passed 0.4, any peak <40% of the maximum value will be removed.
min_distance : int, float
If passed an int, parameter is the number of elements apart a peak must be from neighboring peaks.
If passed a float, must be between 0 and 1 and represents the ratio of the profile to exclude.
E.g. if passed 0.05 with a 1000-element profile, the minimum peak width will be 0.05*1000 = 50 elements.
max_number : int, None
Specify up to how many peaks will be returned. E.g. if 3 is passed in and 5 peaks are found, only the 3 largest
peaks will be returned. If None, no limit will be applied.
search_region : tuple of ints, floats, or both
The region within the profile to search. The tuple specifies the (left, right) edges to search.
This allows exclusion of edges from the search. If a value is an int, it is taken as is. If a float, must
be between 0 and 1 and is the ratio of the profile length. The left value must be less than the right.
Returns
-------
indices: ndarray, values, ndarray
The indices and values of the peaks.
"""
peak_idxs, peak_props = find_peaks(self.values, threshold=threshold, peak_separation=min_distance, max_number=max_number,
search_region=search_region, peak_sort=peak_sort)
self.peaks = [Point(value=peak_val, idx=peak_idx) for peak_idx, peak_val in zip(peak_idxs, peak_props['peak_heights'])]
return peak_idxs, peak_props['peak_heights']
def find_valleys(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05,
max_number: int=None, search_region: Tuple=(0.0, 1.0)) -> Tuple[np.ndarray, np.ndarray]:
"""Find the valleys (minimums) of the profile using a simple minimum value search.
Returns
-------
indices: ndarray, values, ndarray
The indices and values of the valleys.
See Also
--------
:meth:`~pylinac.core.profile.MultiProfile.find_peaks` : Further parameter info.
"""
valley_idxs, valley_props = find_peaks(-self.values, threshold=threshold, peak_separation=min_distance, max_number=max_number,
search_region=search_region)
self.valleys = [Point(value=self.values[valley_idx], idx=valley_idx) for valley_idx, valley_val in zip(valley_idxs, -valley_props['peak_heights'])]
return valley_idxs, self.values[valley_idxs]
def find_fwxm_peaks(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05,
max_number: int = None, search_region: Tuple=(0.0, 1.0), peak_sort: str = 'prominences', required_prominence=None) -> Tuple[np.ndarray, np.ndarray]:
"""Find peaks using the center of the FWXM (rather than by max value).
Parameters
----------
x : int, float
The Full-Width-X-Maximum desired. E.g. 0.7 will return the FW70%M.
Values must be between 0 and 100.
See Also
--------
find_peaks : Further parameter info
"""
_, peak_props = find_peaks(self.values, threshold=threshold, min_width=min_distance, max_number=max_number,
search_region=search_region, peak_sort=peak_sort, required_prominence=required_prominence)
fwxm_peak_idxs = []
for lt, rt in zip(peak_props['left_ips'], peak_props['right_ips']):
fwxm = int(round(lt + (rt - lt)/2))
fwxm_peak_idxs.append(fwxm)
fwxm_peak_vals = [self.values[fwxm] for fwxm in fwxm_peak_idxs]
self.peaks = [Point(value=peak_val, idx=peak_idx) for peak_idx, peak_val in zip(fwxm_peak_idxs, fwxm_peak_vals)]
return np.array(fwxm_peak_idxs), np.array(fwxm_peak_vals)
class CircleProfile(MultiProfile, Circle):
"""A profile in the shape of a circle.
Attributes
----------
image_array : ndarray
The 2D image array.
start_angle : int, float
Starting position of the profile in radians; 0 is right (0 on unit circle).
ccw : bool
How the profile is/was taken; clockwise or counter-clockwise.
"""
image_array: np.ndarray
start_angle: Union[float, int]
ccw: bool
sampling_ratio: float
_x_locations: Optional[np.ndarray]
_y_locations: Optional[np.ndarray]
def __init__(self, center: Point, radius: NumberLike, image_array: np.ndarray,
start_angle: Union[float, int]=0, ccw: bool=True, sampling_ratio: float=1.0):
"""
Parameters
----------
image_array : ndarray
The 2D image array.
start_angle : int, float
Starting position of the profile in radians; 0 is right (0 on unit circle).
ccw : bool
If True (default), the profile will proceed counter-clockwise (the direction on the unit circle).
If False, will proceed clockwise.
sampling_ratio : float
The ratio of pixel sampling to real pixels. E.g. if 1.0, the profile will have approximately
the same number of elements as was encountered in the profile. A value of 2.0 will sample
the profile at 2x the number of elements.
See Also
--------
:class:`~pylinac.core.geometry.Circle` : Further parameter info.
"""
Circle.__init__(self, center, radius)
self._ensure_array_size(image_array, self.radius + self.center.x, self.radius + self.center.y)
self.image_array = image_array
self.start_angle = start_angle
self.ccw = ccw
self.sampling_ratio = sampling_ratio
self._x_locations = None
self._y_locations = None
MultiProfile.__init__(self, self._profile)
@property
def size(self) -> float:
"""The elemental size of the profile."""
return np.pi * self.radius * 2 * self.sampling_ratio
@property
def _radians(self) -> np.ndarray:
interval = (2 * np.pi) / self.size
rads = np.arange(0 + self.start_angle, (2 * np.pi) + self.start_angle - interval, interval)
if self.ccw:
rads = rads[::-1]
return rads
@property
def x_locations(self) -> np.ndarray:
"""The x-locations of the profile values."""
if self._x_locations is None:
return np.cos(self._radians) * self.radius + self.center.x
else:
return self._x_locations
@x_locations.setter
def x_locations(self, array: np.ndarray):
self._x_locations = array
@property
def y_locations(self) -> np.ndarray:
"""The x-locations of the profile values."""
if self._y_locations is None:
return np.sin(self._radians) * self.radius + self.center.y
else:
return self._y_locations
@y_locations.setter
def y_locations(self, array: np.ndarray):
self._y_locations = array
@property
def _profile(self) -> np.ndarray:
"""The actual profile array; private attr that is passed to MultiProfile."""
return ndimage.map_coordinates(self.image_array, [self.y_locations, self.x_locations], order=0)
def find_peaks(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05,
max_number: int=None, search_region: Tuple[float, float]=(0.0, 1.0)) -> Tuple[np.ndarray, np.ndarray]:
"""Overloads Profile to also map peak locations to the image."""
peak_idxs, peak_vals = super().find_peaks(threshold, min_distance, max_number, search_region)
self._map_peaks()
return peak_idxs, peak_vals
def find_valleys(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05,
max_number: int=None, search_region: Tuple[float, float]=(0.0, 1.0)) -> Tuple[np.ndarray, np.ndarray]:
"""Overload Profile to also map valley locations to the image."""
valley_idxs, valley_vals = super().find_valleys(threshold, min_distance, max_number, search_region)
self._map_peaks()
return valley_idxs, valley_vals
def find_fwxm_peaks(self, threshold: Union[float, int]=0.3, min_distance: Union[float, int]=0.05,
max_number: int=None, search_region: Tuple[float, float]=(0.0, 1.0)) -> Tuple[np.ndarray, np.ndarray]:
"""Overloads Profile to also map the peak locations to the image."""
peak_idxs, peak_vals = super().find_fwxm_peaks(threshold, min_distance, max_number,
search_region=search_region)
self._map_peaks()
return peak_idxs, peak_vals
def _map_peaks(self) -> None:
"""Map found peaks to the x,y locations on the image/array; i.e. adds x,y coordinates to the peak locations"""
for peak in self.peaks:
peak.x = self.x_locations[int(peak.idx)]
peak.y = self.y_locations[int(peak.idx)]
def roll(self, amount: int) -> None:
"""Roll the profile and x and y coordinates."""
self.values = np.roll(self.values, -amount)
self.x_locations = np.roll(self.x_locations, -amount)
self.y_locations = np.roll(self.y_locations, -amount)
def plot2axes(self, axes: plt.Axes=None, edgecolor: str='black', fill: bool=False, plot_peaks: bool=True) -> None:
"""Plot the circle to an axes.
Parameters
----------
axes : matplotlib.Axes, None
The axes to plot on. If None, will create a new figure of the image array.
edgecolor : str
Color of the Circle; must be a valid matplotlib color.
fill : bool
Whether to fill the circle. matplotlib keyword.
plot_peaks : bool
If True, plots the found peaks as well.
"""
if axes is None:
fig, axes = plt.subplots()
axes.imshow(self.image_array)
axes.add_patch(
mpl_Circle((self.center.x, self.center.y), edgecolor=edgecolor, radius=self.radius, fill=fill))
if plot_peaks:
x_locs = [peak.x for peak in self.peaks]
y_locs = [peak.y for peak in self.peaks]
axes.autoscale(enable=False)
axes.scatter(x_locs, y_locs, s=40, marker='x', c=edgecolor)
@staticmethod
def _ensure_array_size(array: np.ndarray, min_width: float, min_height: float) -> None:
"""Ensure the array size of inputs are greater than the minimums."""
height = array.shape[0]
width = array.shape[1]
if width < min_width or height < min_height:
raise ValueError("Array size not large enough to compute profile")
class CollapsedCircleProfile(CircleProfile):
"""A circular profile that samples a thick band around the nominal circle, rather than just a 1-pixel-wide profile
to give a mean value.
"""
width_ratio: float
num_profiles: int
@argue.bounds(width_ratio=(0, 1))
def __init__(self, center: Point, radius: NumberLike, image_array: Union[np.ndarray, 'ArrayImage'], start_angle: int=0,
ccw: bool=True, sampling_ratio: float=1.0, width_ratio: float=0.1, num_profiles: int=20):
"""
Parameters
----------
width_ratio : float
The "thickness" of the band to sample. The ratio is relative to the radius. E.g. if the radius is 20
and the width_ratio is 0.2, the "thickness" will be 4 pixels.
num_profiles : int
The number of profiles to sample in the band. Profiles are distributed evenly within the band.
See Also
--------
:class:`~pylinac.core.profile.CircleProfile` : Further parameter info.
"""
self.width_ratio = width_ratio
self.num_profiles = num_profiles
super().__init__(center, radius, image_array, start_angle, ccw, sampling_ratio)
@property
def _radii(self) -> np.ndarray:
return np.linspace(start=self.radius * (1 - self.width_ratio), stop=self.radius * (1 + self.width_ratio),
num=self.num_profiles)
@property
def size(self) -> float:
return np.pi * max(self._radii) * 2 * self.sampling_ratio
@property
def _multi_x_locations(self) -> List:
"""List of x-locations of the sampling profiles"""
x = []
cos = np.cos(self._radians)
# extract profile for each circle radii
for radius in self._radii:
x.append(cos * radius + self.center.x)
return x
@property
def _multi_y_locations(self) -> List:
"""List of x-locations of the sampling profiles"""
y = []
sin = np.sin(self._radians)
# extract profile for each circle radii
for radius in self._radii:
y.append(sin * radius + self.center.y)
return y
@property
def _profile(self) -> np.ndarray:
"""The actual profile array; private attr that is passed to MultiProfile."""
profile = np.zeros(len(self._multi_x_locations[0]))
for radius, x, y in zip(self._radii, self._multi_x_locations, self._multi_y_locations):
profile += ndimage.map_coordinates(self.image_array, [y, x], order=0)
profile /= self.num_profiles
return profile
def plot2axes(self, axes: plt.Axes=None, edgecolor: str='black', fill: bool=False, plot_peaks: bool=True) -> None:
"""Add 2 circles to the axes: one at the maximum and minimum radius of the ROI.
See Also
--------
:meth:`~pylinac.core.profile.CircleProfile.plot2axes` : Further parameter info.
"""
if axes is None:
fig, axes = plt.subplots()
axes.imshow(self.image_array)
axes.add_patch(mpl_Circle((self.center.x, self.center.y), edgecolor=edgecolor, radius=self.radius*(1+self.width_ratio),
fill=fill))
axes.add_patch(mpl_Circle((self.center.x, self.center.y), edgecolor=edgecolor, radius=self.radius*(1-self.width_ratio),
fill=fill))
if plot_peaks:
x_locs = [peak.x for peak in self.peaks]
y_locs = [peak.y for peak in self.peaks]
axes.autoscale(enable=False)
axes.scatter(x_locs, y_locs, s=20, marker='x', c=edgecolor)
def find_peaks(values: np.ndarray, threshold: Union[float, int] = -np.inf, peak_separation: Union[float, int] = 0,
max_number: int = None, fwxm_height: float = 0.5, min_width: int = 0,
search_region: Tuple[float, float] = (0.0, 1.0), peak_sort='prominences', required_prominence=None) \
-> Tuple[np.ndarray, dict]:
"""Find the peaks of a 1D signal. Heavily relies on the scipy implementation.
Parameters
----------
values : array-like
Signal values to search for peaks within.
threshold : int, float
The value the peak must be above to be considered a peak. This removes "peaks"
that are in a low-value region.
If passed an int, the actual value is the threshold.
E.g. when passed 15, any peak less with a value <15 is removed.
If passed a float, it will threshold as a percent. Must be between 0 and 1.
E.g. when passed 0.4, any peak <40% of the maximum value will be removed.
peak_separation : int, float
If passed an int, parameter is the number of elements apart a peak must be from neighboring peaks.
If passed a float, must be between 0 and 1 and represents the ratio of the profile to exclude.
E.g. if passed 0.05 with a 1000-element profile, the minimum peak width will be 0.05*1000 = 50 elements.
max_number : int, None
Specify up to how many peaks will be returned. E.g. if 3 is passed in and 5 peaks are found, only the 3 largest
peaks will be returned.
fwxm_height: float
The relative height at which a FWXM calculation is performed. Although this function finds simple max values,
the underlying function can provide fwxm information as well.
min_width: int
The minimum width of the peak.
search_region: tuple
The search region to use within the values.
Using between 0 and 1 will convert to a ratio of the indices. E.g. to search the middle half of the passed values, use (0.25, 0.75).
Using ints above 1 will use the indices directly. E.g. (33, 71) will search between those two indices.
Returns
-------
peak_idxs : numpy.array
The indices of the peaks found.
peak_props : dict
A dict containing contextual peak data.
"""
peak_separation, shift_amount, threshold, trimmed_values = _parse_peak_args(peak_separation, search_region, threshold,
values)
peak_idxs, peak_props = signal.find_peaks(trimmed_values, rel_height=(1 - fwxm_height), width=min_width, height=threshold,
distance=peak_separation, prominence=required_prominence)
peak_idxs += shift_amount # shift according to the search region left edge
# get the "largest" peaks up to max number, and then re-sort to be left->right like it was originally
largest_peak_idxs = sorted(list(np.argsort(peak_props[peak_sort]))[::-1][:max_number])
# cut down prop arrays as need be
for key, array_vals in peak_props.items():
peak_props[key] = array_vals[largest_peak_idxs]
return peak_idxs[largest_peak_idxs], peak_props
def _parse_peak_args(peak_separation: NumberLike, search_region: Tuple[float, float], threshold: NumberLike,
values: np.ndarray) -> Tuple[NumberLike, int, NumberLike, np.ndarray]:
"""Converts arguments as needed. E.g. converting a ratio to actual values"""
# set threshold as % if between 0 and 1
val_range = values.max() - values.min()
if 0 <= threshold <= 1:
threshold = values.min() + threshold * val_range
# set separation as % if between 0 and 1
if 0 <= peak_separation <= 1:
peak_separation = max(int(peak_separation * len(values)), 1)
# limit to search region
if max(search_region) <= 1:
shift_amount = int(search_region[0] * len(values))
values = values[int(search_region[0] * len(values)):int(search_region[1] * len(values))]
else:
values = values[search_region[0]:search_region[1]]
shift_amount = search_region[0]
return peak_separation, shift_amount, threshold, values
|
jrkerns/pylinac
|
pylinac/core/profile.py
|
Python
|
mit
| 53,985
|
[
"Gaussian"
] |
1a615432a1bf113da6b49fad0f6e32588b7bb33630330c178a40ebe2e14ef00f
|
# We only import librairies needed for plotting
# Other librairies are imported in the class definition file, G3D_class.py,
# which contains all process and variables function definition.
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import N3D_class
import G3D_class
# We instantiate an object of the class G3D, just by giving the path to the netcdf file to work with
# Up to now I'm working with 4D netcdf files containing several variables.
# Outputs from different files can be merged easily, as can be seen in other examples
N = N3D_class.N3D('OutNEMO/BS_1d_19900101_20071231_grid_T_199401-199401.nc','local_NEMO.yml')
G = G3D_class.G3D('OutGHER/r5.nc', 'local_GHER.yml')
# All loaded variables are attributes of the G3D instance.
# For example the variable "bat" is defined directly when the object is instantiated.
# Other are loaded only when needed.
# Variables are python Masked_array, so they have an attribute mask which is an arreay of booleans
# Here we want to define a mask based on bathymetry
NmaskDS= (N.bat<50 ) & ~(N.bat.mask) # Mask should be True where masked
NmaskSH= (N.bat>=50) & ~(N.bat.mask) # Mask should be True where masked
GmaskSH= (G.bat>=50) & ~(G.bat.mask)
GmaskDS= (G.bat<50) & ~(G.bat.mask)
# All processing functions are called as function of the G3D instance.
# Variable name is given as an argument. Some functions allows more argument.
# This would give the basin averaged time serie of salinity
#T1 = N.avgspatial('SAL')
# The avgspatial function enables an optional mask argument
# Note also , that we can use a variable name that is not defined in the netcdf file.
# In this case the toolbox will automatically look for the function "instance_SSS"
NsssDS=N.avgspatial('SSS',NmaskDS)
NsssSH=N.avgspatial('SSS',NmaskSH)
GsssDS=G.avgspatial('SSS',GmaskDS)
GsssSH=G.avgspatial('SSS',GmaskSH)
# The following is general python plotting ..
# the "dates" attribute is also loaded automatically
####################
# 1st figure :
####################
locator = mdates.AutoDateLocator()
formator = mdates.AutoDateFormatter(locator)
fig=plt.figure(figsize=(15, 8))
ax=plt.subplot(1, 1, 1)
ax.xaxis_date()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formator)
plt.plot(N.dates,NsssSH, label = 'Average surface salinity on the shelf - NEMO', color='r',linestyle='-')
plt.plot(G.dates,GsssSH, label = 'Average surface salinity on the shelf - GHER', color='r',linestyle='--')
plt.plot(N.dates,NsssDS, label = 'Average surface salinity in the open sea - NEMO', color='b',linestyle='-')
plt.plot(G.dates,GsssDS, label = 'Average surface salinity in the open sea - GHER', color='b',linestyle='--')
plt.title('Sea Surface Salinity')
plt.ylabel('Practical Salinity - []')
plt.legend(loc='best')
fig.savefig(N.figoutputdir+'Simple.png')
|
acapet/GHER-POSTPROC
|
Examples/EasyExample_NEMO.py
|
Python
|
gpl-3.0
| 3,415
|
[
"NetCDF"
] |
25460f5334317ac28dfa294510e63a83d3d429e10942e6f3d7a0e53c860a687b
|
import csv
import heapq
import numpy as np
from ase.tasks.io import read_json
ann_fontsize = 'small'
label_fontsize = 12
def most_common_value(values, precision=None):
import heapq
# find the most common value
if precision is not None:
vs = [round(v, precision) for v in values]
else:
vs = values[:]
if len(vs) > 0:
nlargest = heapq.nlargest(1, [vs.count(e) for e in vs])[0]
index = [vs.count(e) for e in vs].index(nlargest)
v = vs[index]
else:
v = None
return v
def get_differences(reference, values, precision=None):
# diffrences with respect to the reference value within precision
values = values[:]
for n, v in enumerate(values):
if v is not None:
de = v - reference
if precision is not None:
if abs(de) < 1. / precision:
# value within precision
values[n] = 0.0
else:
values[n] = round(de, precision)
else:
values[n] = de
return values
def get_key_summary_list(key, name, runs, data, precision=3, relative=False):
# if relative, then first entry is a common value, other entries relative
# if not relative, then first entry is average value, other entries verbatim
nruns = len(runs)
d = data.copy()
l = [name]
values = [d[r][key] for r in runs if r in d and key in d[r] and d[r][key] is not None]
if relative:
# find the most common value
e = most_common_value(values, precision)
else:
e = np.mean(values)
if e is None:
l.extend(['None' for i in range(nruns + 1)])
else:
# print the found common value with the precision
l.append(("%." + "%df" % (precision + 0)) % e)
es = [d[r].get(key, None) for r in runs if r in d]
if relative:
des = get_differences(e, es)
# and the corresponding differences
for n, de in enumerate(des):
if de is not None:
if abs(de) < 1. / precision:
# value within precision
des[n] = '-'
else:
des[n] = ("%." + "%df" % precision) % de
else:
des[n] = 'None'
l.extend(des)
else:
des = es[:]
for n, de in enumerate(es):
if de is not None:
des[n] = str(de)
else:
des[n] = 'None'
l.extend(des)
return l
def get_key_stats(data, key, nnlargest):
keys = []
values = []
stats = {'absaverage': None,
'average': None,
'std': None,
'max': None,
'N': None,
'nlargest': []}
for k, v in data.iteritems():
if key in v:
if v.get(key) is not None:
keys.append(k)
values.append(v[key])
if len(keys) > 0:
stats['absaverage'] = np.average(abs(np.array(values)))
stats['average'] = np.average(np.array(values))
stats['std'] = np.std(np.array(values))
stats['max'] = np.max(np.array(values))
stats['N'] = len(values)
nlargest = heapq.nlargest(nnlargest, np.abs(np.array(values)))
# already found elements must be removed in order to avoid repetition
k = keys[:]
v = values[:]
nl = []
for i in nlargest:
ind = v.index(i)
nl.append((k[ind], i))
v.pop(ind)
k.pop(ind)
stats['nlargest'] = nl
return stats
def plot_single(xdata, ydata, std,
title,
xlabel, ylabel,
label, color, alpha,
miny, maxy,
num=1,
):
import matplotlib
matplotlib.rc('text', usetex=False)
import pylab
import matplotlib.font_manager
# all goes to figure num
pylab.figure(num=num, figsize=(9.5, 9))
pylab.gca().set_position([0.10, 0.20, 0.85, 0.60])
# let the plot have fixed y-axis scale
ywindow = maxy - miny
pylab.bar(xdata, ydata, 0.9, yerr=std,
label=label, color=color, alpha=alpha)
pylab.gca().set_ylim(miny, maxy)
t = pylab.title(title)
# http://old.nabble.com/More-space-between-title-and-secondary-x-axis-td31722298.html
t.set_y(1.05)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
prop = matplotlib.font_manager.FontProperties(size=12)
leg = pylab.legend(loc='upper right', fancybox=True, prop=prop)
leg.get_frame().set_alpha(0.5)
def plot(runs, data,
labels, # labels coresponding to runs
key, # the main quantity to plot
failkey, # the key that informs about failure/success in run
nnlargest,
plot_label,
plot_xlabel,
plot_ylabel,
miny, maxy,
tag=None,
tunit='min'):
# horror function
d = data.copy()
# collect stats for each run
stats = {'average': [],
'std': [],
'sdom': [],
'nlargest': [],
'time': [],
'converged': []}
for n, r in enumerate(runs):
s = get_key_stats(d[r], key, nnlargest)
assert s['average'] is not None, r + ': no key ' + "'" + key + "'" + ' in results?'
stats['average'].append(s['average'])
stats['std'].append(s['std'])
stats['sdom'].append(s['std']/np.sqrt(s['N']))
stats['nlargest'].append(s['nlargest'])
# total run time
t = [d[r][k]['time'] for k in d[r].keys() if 'time' in d[r][k]]
if tunit == 'sec':
stats['time'].append(sum(t)) # sec
elif tunit == 'min':
stats['time'].append(sum(t) / 60) # min
elif tunit == 'h':
stats['time'].append(sum(t) / 3600) # hours
# number of converged systems
c = [d[r][k][failkey] for k in d[r].keys() if failkey in d[r][k]]
stats['converged'].append(len([s for s in c if s is not None]))
import matplotlib
#matplotlib.use('Agg')
matplotlib.rc('text', usetex=False)
from matplotlib import pylab, ticker
num=1
scale = [i for i in range(len(runs))]
# plot skeleton
plot_single(
scale, stats['average'], stats['sdom'],
plot_label,
plot_xlabel,
plot_ylabel,
'Average (avg)',
'blue',
0.5,
miny, maxy,
num=1,
)
zero = [0.0 for v in scale]
pylab.plot(scale, zero, 'k-', label='_nolegend_')
ay1=pylab.gca()
ay1.xaxis.set_ticks([n + 0.5 for n in scale])
ay1.xaxis.set_ticklabels(labels)
ay1.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
for label in ay1.get_xticklabels() + ay1.get_yticklabels():
label.set_fontsize(label_fontsize)
# rotate labels http://old.nabble.com/Rotate-x-axes-%28xticks%29-text.-td3141258.html
for label in ay1.get_xticklabels():
label.set_rotation(75)
# plot stats
for n, r in enumerate(runs):
label = ''
for n1, v in enumerate(['average', 'std', 'time']):
l = {
'average': '\navg\n',
'absaverage': '\nabs\n',
'std': '\nstd\n',
'time': '\nt [%s]\n' % tunit,
}[v]
value = {
'average': stats['average'][n],
#'absaverage': stats['absaverage'][n],
'std': stats['std'][n],
'time': stats['time'][n],
}[v]
label += l + ' ' + str(round(value, 1)) + '\n'
label += '\nconv.\n' + ' ' + str(int(stats['converged'][n])) + '\n'
pylab.annotate(label,
xy=(n + 0.0, 0.0),
xytext=(n + 0.2, miny / 2 - 0.5),
arrowprops=None,
horizontalalignment='left',
verticalalignment='center',
fontsize=ann_fontsize,
)
# plot compounds with largest errors
for n, l in enumerate(stats['nlargest']):
l.reverse()
for n1, (c, e) in enumerate(l):
name = c + '\n'
label = name + ' ' + str(int(e))
xytext=(n + 0.3, maxy / 2 + maxy / 10 * n1)
if e > maxy:
# labels exceeding y-scale
xy = xytext
else:
xy=(n + 0.05, e)
pylab.annotate(label,
xy=xy,
xytext=xytext,
arrowprops=dict(width=0.05,
headwidth=5.0,
facecolor='black',
shrink=1.00),
horizontalalignment='left',
verticalalignment='center',
fontsize=ann_fontsize,
)
# axis limits after the last plot!
pylab.ylim(miny, maxy)
kname = key.replace(' ', '_')
if tag is not None:
plotname = '%s_%s.png' % (tag, kname)
else:
plotname = '%s.png' % kname
pylab.savefig(plotname, bbox_inches='tight')
class AnalyseOptimizersTask:
def __init__(self, taskname, runs,
labels=None, tag=None, steps=100, precision=3, tunit='min'):
"""Analyse optimizers runs. """
self.taskname = taskname
self.runs = runs
if labels is None: # run labels for plotting
self.labels = self.runs.split(',')
else:
self.labels = labels
self.tag = tag
self.steps = steps # only for purpose of limit plot y-axis
self.precision = precision
assert tunit in ['sec', 'min', 'h']
self.tunit = tunit
self.key_summary = 'relaxed energy'
self.key_plot = 'optimizer force calls'
self.plot_label = 'Summary of optimizers'
self.plot_xlabel = 'optimizer'
self.plot_ylabel = self.key_plot
def read_run(self, taskname, tag, run):
if tag is not None:
tag += '_%s' % run
else:
tag = run
return read_json(taskname + '-' + tag + '.json')
def get_data(self, taskname, tag, runs):
d = {}
for r in runs:
d[r] = self.read_run(taskname, tag, r)
return d
def analyse(self):
runs = self.runs.split(',')
# dict of systems for each run
datarun = self.get_data(self.taskname, self.tag, runs)
# the number of systems (based on results from json)
nsystems = max([len(datarun[n]) for n in datarun.keys()])
# dict of runs for each system
datasys = {}
for k, v in datarun.iteritems():
for k1, v1 in v.iteritems():
if k1 not in datasys:
datasys[k1] = {k: v1}
else:
datasys[k1].update({k: v1})
# csv summary of self.key_plot
key_name = self.key_plot.replace(' ', '_')
row = ['formula', self.key_plot]
row.extend([r for r in range(len(runs))])
rows = [row]
for name, data in datasys.items():
if not data:
continue
row = get_key_summary_list(self.key_plot,
name,
runs,
data,
precision=self.precision,
relative=False)
row = [r.replace('None', 'N/A') for r in row]
# only failed or non-common runs
for k in row[2:]:
if k == 'N/A' or k != '-':
rows.append(row)
break
if len(rows) > 0: # always create csv file
if self.tag is not None:
csvwriter = csv.writer(
open('%s_%s.csv' % (self.tag, key_name), 'wb'))
else:
csvwriter = csv.writer(open('%s.csv' % key_name, 'wb'))
for r in rows:
csvwriter.writerow(r)
# csv summary of self.key_summary
summary_name = self.key_summary.replace(' ', '_')
row = ['formula', self.key_summary]
row.extend([r for r in range(len(runs))])
rows = [row]
for name, data in datasys.items():
if not data:
continue
row = get_key_summary_list(self.key_summary,
name,
runs,
data,
precision=self.precision,
relative=True)
row = [r.replace('None', 'N/A') for r in row]
# only failed or non-common runs
for k in row[2:]:
if k == 'N/A' or k != '-':
rows.append(row)
break
if len(rows) > 0: # always create csv file
if self.tag is not None:
csvwriter = csv.writer(
open('%s_%s.csv' % (self.tag, summary_name), 'wb'))
else:
csvwriter = csv.writer(open('%s.csv' % summary_name, 'wb'))
for r in rows:
csvwriter.writerow(r)
# plot
maxy = self.steps - 5
miny = - ((maxy + 5) / 2 + 5)
# plot only runs with data
plotruns = runs[:]
labels = self.labels[:]
for n, r in enumerate(runs):
nd = [1 for k in datarun[r] if datarun[r][k]]
if len(nd) == 0:
print('skipped plotting empty data', r)
# no data for this run
i = plotruns.index(r)
plotruns.pop(i)
labels.pop(i)
continue
plot(plotruns, datarun,
labels,
self.key_plot, # the main quantity to plot
self.key_summary,
4,
self.plot_label + ': ' + str(nsystems) + ' systems',
self.plot_xlabel,
self.plot_ylabel,
miny, maxy,
tag=self.tag,
tunit=self.tunit)
class AnalyseSCFTask(AnalyseOptimizersTask):
def __init__(self, taskname, runs,
labels=None, tag=None, steps=100, precision=3, tunit='min'):
"""Analyse SCF runs. """
AnalyseOptimizersTask.__init__(self, taskname, runs,
labels, tag, steps, precision, tunit)
self.key_summary = 'energy'
self.key_plot = 'calculator steps'
self.plot_label = 'Summary of runs'
self.plot_xlabel = 'run'
self.plot_ylabel = self.key_plot
|
suttond/MODOI
|
ase/test/tasks/analyse.py
|
Python
|
lgpl-3.0
| 14,864
|
[
"ASE"
] |
86bf563d491deaa2e2132ca7f99bb2c85705a0380a78d111d19aa1f706cee115
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Non-relativistic UKS analytical nuclear gradients'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rks as rks_grad
from pyscf.grad import uhf as uhf_grad
from pyscf.dft import numint, gen_grid
from pyscf import __config__
def get_veff(ks_grad, mol=None, dm=None):
'''
First order derivative of DFT effective potential matrix (wrt electron coordinates)
Args:
ks_grad : grad.uhf.Gradients or grad.uks.Gradients object
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (logger.process_clock(), logger.perf_counter())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = get_vxc_full_response(ni, mol, grids, mf.xc, dm,
max_memory=max_memory,
verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = get_vxc(ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj[0] + vj[1]
else:
vj, vk = ks_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
with mol.with_range_coulomb(omega):
vk += ks_grad.get_k(mol, dm) * (alpha - hyb)
vxc += vj[0] + vj[1] - vk
return lib.tag_array(vxc, exc1_grid=exc)
def get_vxc(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1,
verbose=verbose)[1]
vrho = vxc[0]
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vmat[0], mol, ao[1:4], aow, mask, ao_loc, True)
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vmat[1], mol, ao[1:4], aow, mask, ao_loc, True)
vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1,
verbose=verbose)[1]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
rks_grad._gga_grad_sum_(vmat[0], mol, ao, wva, mask, ao_loc)
rks_grad._gga_grad_sum_(vmat[1], mol, ao, wvb, mask, ao_loc)
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
exc = numpy.zeros((mol.natm,3))
# - sign because nabla_X = -nabla_x
return exc, -vmat
def get_vxc_full_response(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Full response including the response of the grids'''
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
aoslices = mol.aoslice_by_atom()
excsum = 0
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1,
verbose=verbose)[:2]
vrho = vxc[0]
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a+rho_b, weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1,
verbose=verbose)[:2]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wva, mask, ao_loc)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a[0]+rho_b[0], weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wvb, mask, ao_loc)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
# - sign because nabla_X = -nabla_x
return excsum, -vmat
class Gradients(uhf_grad.Gradients):
grid_response = getattr(__config__, 'grad_uks_Gradients_grid_response', False)
def __init__(self, mf):
uhf_grad.Gradients.__init__(self, mf)
self.grids = None
self.grid_response = False
self._keys = self._keys.union(['grid_response', 'grids'])
def dump_flags(self, verbose=None):
uhf_grad.Gradients.dump_flags(self, verbose)
logger.info(self, 'grid_response = %s', self.grid_response)
return self
get_veff = get_veff
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
if self.grid_response:
vhf = envs['vhf']
log = envs['log']
log.debug('grids response for atom %d %s',
atom_id, vhf.exc1_grid[atom_id])
return vhf.exc1_grid[atom_id]
else:
return 0
Grad = Gradients
from pyscf import dft
dft.uks.UKS.Gradients = dft.uks_symm.UKS.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.charge = 1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-12
#mf.grids.atom_grid = (20,86)
e0 = mf.scf()
g = mf.Gradients()
print(lib.finger(g.kernel()) - -0.12090786243525126)
#[[-5.23195019e-16 -5.70291415e-16 5.32918387e-02]
# [ 1.33417513e-16 6.75277008e-02 -2.66519852e-02]
# [ 1.72274651e-16 -6.75277008e-02 -2.66519852e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.12091122429043633)
#[[-2.95956939e-16 -4.22275612e-16 5.32998759e-02]
# [ 1.34532051e-16 6.75279140e-02 -2.66499379e-02]
# [ 1.68146089e-16 -6.75279140e-02 -2.66499379e-02]]
mf.xc = 'b88,p86'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.11509739136150157)
#[[ 2.58483362e-16 5.82369026e-16 5.17616036e-02]
# [-5.46977470e-17 6.39273304e-02 -2.58849008e-02]
# [ 5.58302713e-17 -6.39273304e-02 -2.58849008e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.11507986316077731)
mf.xc = 'b3lypg'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.10202554999695367)
#[[ 6.47874920e-16 -2.75292214e-16 3.97215970e-02]
# [-6.60278148e-17 5.87909340e-02 -1.98650384e-02]
# [ 6.75500259e-18 -5.87909340e-02 -1.98650384e-02]]
mol = gto.Mole()
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0. )], ]
mol.unit = 'B'
mol.basis = '631g'
mol.charge = -1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365160440876001)
# sum over z direction non-zero, due to meshgrid response
# H -0.0000000000 0.0000000000 -0.1481125370
# F -0.0000000000 0.0000000000 0.1481164667
mf = dft.UKS(mol)
mf.grids.prune = None
mf.grids.level = 6
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365040148752827)
# H 0.0000000000 0.0000000000 -0.1481124925
# F -0.0000000000 0.0000000000 0.1481122913
|
sunqm/pyscf
|
pyscf/grad/uks.py
|
Python
|
apache-2.0
| 11,261
|
[
"PySCF"
] |
cdac03c2803fc929ec2d3e4e6a776019ce51ddb4e0dd5104356e16ef608363af
|
"""Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
from . import types as t
from .util import (
display,
ApplicationError,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/'))
if import_path not in imports_by_target_path:
import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py')
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix
else:
prefix = 'ansible.module_utils.'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if path == os.path.join(data_context().content.module_utils_path, '__init__.py'):
continue
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name, node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils' or node.module.startswith('ansible.module_utils.'):
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_import('%s.%s' % (node.module, alias.name), node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while len(name) > len('ansible.module_utils.'):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if self.path.startswith('test/'):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
|
amenonsen/ansible
|
test/lib/ansible_test/_internal/import_analysis.py
|
Python
|
gpl-3.0
| 8,975
|
[
"VisIt"
] |
b8e6b9b184ce5b9b03135e2d6801267641ec69d9be9c8e76cc47d49cd3384a9a
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""
Demo #6
The sixth script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script uses real galaxy images from COSMOS observations. The catalog of real galaxy
images distributed with GalSim only includes 100 galaxies, but you can download a much
larger set of images. See https://github.com/GalSim-developers/GalSim/wiki for a link
to the download page.
The galaxy images include images of the effective PSF for the original observations,
so GalSim considers the galaxy profile to be the observed image deconvolved by that PSF.
In this case, we then randomly rotate the galaxies, apply a given gravitational shear as
well as gravitational magnification, and then finally convolve by a double Gaussian PSF.
The final image can of course have any pixel scale, not just that of the original images.
The output for this script is to a FITS "data cube". With DS9, this can be viewed with a
slider to quickly move through the different images.
New features introduced in this demo:
- real_cat = galsim.RealGalaxyCatalog(file_name, dir)
- real_cat.preload()
- obj = galsim.Gaussian(fwhm, flux)
- obj = galsim.RealGalaxy(real_cat, index)
- obj.applyRotation(theta)
- obj.applyMagnification(mu)
- image += background
- noise = galsim.PoissonNoise() # with no sky_level given
- obj.draw(..., offset)
- galsim.fits.writeCube([list of images], file_name)
"""
import sys
import os
import math
import numpy
import logging
import time
import galsim
def main(argv):
"""
Make a fits image cube using real COSMOS galaxies from a catalog describing the training
sample.
- The number of images in the cube matches the number of rows in the catalog.
- Each image size is computed automatically by GalSim based on the Nyquist size.
- Both galaxies and stars.
- PSF is a double Gaussian, the same for each galaxy.
- Galaxies are randomly rotated to remove the imprint of any lensing shears in the COSMOS
data.
- The same shear is applied to each galaxy.
- Noise is Poisson using a nominal sky value of 1.e6 ADU/arcsec^2,
the noise in the original COSMOS data.
"""
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("demo6")
# Define some parameters we'll use below.
cat_file_name = 'real_galaxy_catalog_example.fits'
# This script is designed to be run from the examples directory so dir is a relative path.
# But the '../examples/' part lets bin/demo6 also be run from the bin directory.
dir = '../examples/data'
# Make output directory if not already present.
if not os.path.isdir('output'):
os.mkdir('output')
cube_file_name = os.path.join('output','cube_real.fits')
psf_file_name = os.path.join('output','psf_real.fits')
random_seed = 1512413
sky_level = 1.e6 # ADU / arcsec^2
pixel_scale = 0.15 # arcsec
gal_flux = 1.e5 # arbitrary choice, makes nice (not too) noisy images
gal_g1 = -0.027 #
gal_g2 = 0.031 #
gal_mu = 1.082 # mu = ( (1-kappa)^2 - g1^2 - g2^2 )^-1
psf_inner_fwhm = 0.6 # arcsec
psf_outer_fwhm = 2.3 # arcsec
psf_inner_fraction = 0.8 # fraction of total PSF flux in the inner Gaussian
psf_outer_fraction = 0.2 # fraction of total PSF flux in the inner Gaussian
ngal = 100
logger.info('Starting demo script 6 using:')
logger.info(' - real galaxies from catalog %r',cat_file_name)
logger.info(' - double Gaussian PSF')
logger.info(' - pixel scale = %.2f',pixel_scale)
logger.info(' - Applied gravitational shear = (%.3f,%.3f)',gal_g1,gal_g2)
logger.info(' - Poisson noise (sky level = %.1e).', sky_level)
# Read in galaxy catalog
# Note: dir is the directory both for the catalog itself and also the directory prefix
# for the image files listed in the catalog.
# If the images are in a different directory, you may also specify image_dir, which gives
# the relative path from dir to wherever the images are located.
real_galaxy_catalog = galsim.RealGalaxyCatalog(cat_file_name, dir=dir)
# Preloading the header information usually speeds up subsequent access.
# Basically, it tells pyfits to read all the headers in once and save them, rather
# than re-open the galaxy catalog fits file each time you want to access a new galaxy.
# If you are doing more than a few galaxies, then it seems to be worthwhile.
real_galaxy_catalog.preload()
logger.info('Read in %d real galaxies from catalog', real_galaxy_catalog.nobjects)
# Make the ePSF
# first make the double Gaussian PSF
psf1 = galsim.Gaussian(fwhm = psf_inner_fwhm, flux = psf_inner_fraction)
psf2 = galsim.Gaussian(fwhm = psf_outer_fwhm, flux = psf_outer_fraction)
psf = psf1+psf2
# make the pixel response
pix = galsim.Pixel(pixel_scale)
# convolve PSF and pixel response function to get the effective PSF (ePSF)
epsf = galsim.Convolve([psf, pix])
# Draw this one with no noise.
epsf_image = epsf.draw(dx = pixel_scale)
# write to file
epsf_image.write(psf_file_name)
logger.info('Created ePSF and wrote to file %r',psf_file_name)
# Build the images
all_images = []
for k in range(ngal):
logger.debug('Start work on image %d',k)
t1 = time.time()
# Initialize the random number generator we will be using.
rng = galsim.UniformDeviate(random_seed+k)
gal = galsim.RealGalaxy(real_galaxy_catalog, index = k)
logger.debug(' Read in training sample galaxy and PSF from file')
t2 = time.time()
# Set the flux
gal.setFlux(gal_flux)
# Rotate by a random angle
theta = 2.*math.pi * rng() * galsim.radians
gal.applyRotation(theta)
# Apply the desired shear
gal.applyShear(g1=gal_g1, g2=gal_g2)
# Also apply a magnification mu = ( (1-kappa)^2 - |gamma|^2 )^-1
# This conserves surface brightness, so it scales both the area and flux.
gal.applyMagnification(gal_mu)
# Make the combined profile
final = galsim.Convolve([psf, pix, gal])
# Offset by up to 1/2 pixel in each direction
# We had previously (in demo4 and demo5) used applyShift(dx,dy) as a way to shift the
# center of the image. Since that is applied to the galaxy, the units are arcsec (since
# the galaxy profile itself doesn't know about the pixel scale). Here, the offset applies
# to the drawn image, which does know about the pixel scale, so the units of offset are
# pixels, not arcsec. Here, we apply an offset of up to half a pixel in each direction.
dx = rng() - 0.5
dy = rng() - 0.5
# Draw the profile
if k == 0:
# Note that the offset argument may be a galsim.PositionD object or a tuple (dx,dy).
im = final.draw(dx=pixel_scale, offset=(dx,dy))
xsize, ysize = im.array.shape
else:
im = galsim.ImageF(xsize,ysize)
final.draw(im, dx=pixel_scale, offset=(dx,dy))
logger.debug(' Drew image')
t3 = time.time()
# Add a constant background level
background = sky_level * pixel_scale**2
im += background
# Add Poisson noise. This time, we don't give a sky_level, since we have already
# added it to the image, so we don't want any more added. The sky_level parameter
# really defines how much _extra_ sky should be added above what is already in the image.
im.addNoise(galsim.PoissonNoise(rng))
logger.debug(' Added Poisson noise')
t4 = time.time()
# Store that into the list of all images
all_images += [im]
t5 = time.time()
logger.debug(' Times: %f, %f, %f, %f',t2-t1, t3-t2, t4-t3, t5-t4)
logger.info('Image %d: size = %d x %d, total time = %f sec', k, xsize, ysize, t5-t1)
logger.info('Done making images of galaxies')
# Now write the image to disk.
# We write the images to a fits data cube.
galsim.fits.writeCube(all_images, cube_file_name)
logger.info('Wrote image to fits data cube %r',cube_file_name)
if __name__ == "__main__":
main(sys.argv)
|
mardom/GalSim
|
examples/demo6.py
|
Python
|
gpl-3.0
| 9,206
|
[
"Galaxy",
"Gaussian"
] |
7c09d9d4f68a7f12f22e361f4cd949ee176ea743c2a524ea35bfebcd99d6d92b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# upload - Plain and efficient file upload back end
# Copyright (C) 2003-2012 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Plain file upload back end"""
import os
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables
from shared.parseflags import verbose
from shared.validstring import valid_user_path
block_size = 1024 * 1024
def signature():
"""Signature of the main function"""
defaults = {
'flags': [''],
'path': REJECT_UNSET,
'fileupload': REJECT_UNSET,
'restrict': [False],
}
return ['html_form', defaults]
def write_chunks(path, file_obj, restrict):
"""Write file_obj bytes to path and set strict permissions if restrict
is set. Removes file if upload fails for some reason.
"""
try:
upload_fd = open(path, 'wb')
while True:
chunk = file_obj.read(block_size)
if not chunk:
break
upload_fd.write(chunk)
upload_fd.close()
if restrict:
os.chmod(path, 0600)
return True
except Exception, exc:
os.remove(path)
raise exc
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
status = returnvalues.OK
defaults = signature()[1]
# IMPORTANT: the CGI front end forces the input extraction to be delayed
# We must manually extract and parse input here to avoid memory explosion
# for huge files!
# TODO: explosions still happen sometimes!
# Most likely because of Apache SSL renegotiations which have
# no other way of storing input
extract_input = user_arguments_dict['__DELAYED_INPUT__']
logger.info('Extracting input in %s' % op_name)
form = extract_input()
logger.info('After extracting input in %s' % op_name)
file_item = None
file_name = ''
user_arguments_dict = {}
if form.has_key('fileupload'):
file_item = form['fileupload']
file_name = file_item.filename
user_arguments_dict['fileupload'] = ['true']
user_arguments_dict['path'] = [file_name]
if form.has_key('path'):
user_arguments_dict['path'] = [form['path'].value]
if form.has_key('restrict'):
user_arguments_dict['restrict'] = [form['restrict'].value]
else:
user_arguments_dict['restrict'] = defaults['restrict']
logger.info('Filtered input is: %s' % user_arguments_dict)
# Now validate parts as usual
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
path = accepted['path'][-1]
restrict = accepted['restrict'][-1]
if not configuration.site_enable_griddk:
output_objects.append({'object_type': 'text', 'text':
'''Grid.dk features are disabled on this site.
Please contact the Grid admins %s if you think they should be enabled.
''' % configuration.admin_email})
return (output_objects, returnvalues.OK)
logger.info('Filtered input validated with result: %s' % accepted)
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
if verbose(flags):
for flag in flags:
output_objects.append({'object_type': 'text', 'text'
: '%s using flag: %s' % (op_name,
flag)})
output_objects.append({'object_type': 'header', 'text'
: 'Uploading file'})
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing consistent
# error messages
real_path = os.path.realpath(os.path.join(base_dir, path))
# Implicit destination
if os.path.isdir(real_path):
real_path = os.path.join(real_path, os.path.basename(file_name))
if not valid_user_path(real_path, base_dir, True):
logger.warning('%s tried to %s restricted path %s ! (%s)'
% (client_id, op_name, real_path, path))
output_objects.append(
{'object_type': 'error_text', 'text'
: "Invalid destination (%s expands to an illegal path)" % path})
return (output_objects, returnvalues.CLIENT_ERROR)
if not os.path.isdir(os.path.dirname(real_path)):
output_objects.append({'object_type': 'error_text', 'text'
: "cannot write: no such file or directory: %s)"
% path})
return (output_objects, returnvalues.CLIENT_ERROR)
# We fork off here and redirect the user to a progress page for user
# friendly output and to avoid cgi timeouts from killing the upload.
# We use something like the Active State python recipe for daemonizing
# to properly detach from the CGI process and continue in the background.
# Please note that we only close stdio file descriptors to avoid closing
# the fileupload.
file_item.file.seek(0, 2)
total_size = file_item.file.tell()
file_item.file.seek(0, 0)
try:
pid = os.fork()
if pid == 0:
os.setsid()
pid = os.fork()
if pid == 0:
os.chdir('/')
os.umask(0)
for fno in range(3):
try:
os.close(fno)
except OSError:
pass
else:
os._exit(0)
except OSError, ose:
output_objects.append({'object_type': 'error_text', 'text'
: '%s upload could not background! (%s)'
% (path, str(ose).replace(base_dir, ''
))})
return (output_objects, returnvalues.SYSTEM_ERROR)
# The detached grand child takes care of writing and the original process
# redirects to progress page
if pid == 0:
try:
write_chunks(real_path, file_item.file, restrict)
except Exception, exc:
pass
else:
output_objects.append({'object_type': 'text', 'text'
: 'Upload of %s in progress' % path})
progress_link = {'object_type': 'link', 'text': 'show progress',
'destination': 'uploadprogress.py?path=%s;size=%d'
% (path, total_size)}
output_objects.append(progress_link)
return (output_objects, status)
|
heromod/migrid
|
mig/shared/functionality/upload.py
|
Python
|
gpl-2.0
| 8,294
|
[
"Brian"
] |
d02dd8dae12413090518d3f33aceabe96be9f49c2c19bcf5b06319dd87bc96c8
|
#!/usr/bin/python
import sys
import numpy as np
from scipy import linalg as li
from numpy import float_
from numpy import absolute as abs
from numpy import random as ran
import matplotlib
from scipy.signal.signaltools import convolve2d
from scipy.interpolate.interpolate import interp1d
import ssp_Hector_Fit3D_my as my
def linear_least_squares(a, b, residuals=False):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : (M,) array_like
Ordinate or "dependent variable" values.
residuals : bool
Compute the residuals associated with the least-squares solution
Returns
-------
x : (M,) ndarray
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residuals : int (Optional)
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
"""
# Copyright (c) 2013 Alexandre Drouin. All rights reserved.
# From https://gist.github.com/aldro61/5889795
from warnings import warn
# from scipy.linalg.fblas import dgemm
from scipy.linalg.blas import dgemm
# if type(a) != np.ndarray or not a.flags['C_CONTIGUOUS']:
# warn('Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result' + \
# ' in increased memory usage.')
a = np.asarray(a, order='c')
i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True)
x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b)).flatten()
if residuals:
return x, np.linalg.norm(np.dot(a, x) - b)
else:
return x
def print_time():
import time
[year,mon,mday,hour,min,sec,wday,yday,isdst] = time.localtime()
print "# TIME "+str(sec)+" "+str(min)+" "+str(hour)+" "+str(mday)+" "+str(mon)+" "+str(year)+" "+str(wday)+" "+str(yday)+" "+str(isdst)
sec_now=hour*3600.+min*60.+sec
return sec_now
def get_seconds():
import time
[year,mon,mday,hour,min,sec,wday,yday,isdst] = time.localtime()
sec_now=yday*3600.*24.+hour*3600.+min*60.+sec
return sec_now
def get_time():
import time
[year,mon,mday,hour,min,sec,wday,yday,isdst] = time.localtime()
time="# TIME "+str(sec)+" "+str(min)+" "+str(hour)+" "+str(mday)+" "+str(mon)+" "+str(year)+" "+str(wday)+" "+str(yday)+" "+str(isdst)
return time
def linfit1d(pdl_flux, pdl_model_n, weigh=[1]):
nx, ny = pdl_model_n.shape
pdl_flux = np.array([pdl_flux])
if nx < ny:
pdl_model_n = np.transpose(pdl_model_n)
nx = ny
pdl_flux_m = pdl_flux/np.mean(pdl_flux)
A = pdl_model_n
B = pdl_flux_m
if len(weigh) == nx: #Definicion del peso
weigh = np.diag(weigh)
A = np.dot(weigh,A)
B = np.dot(weigh,np.transpose(B))
else:
B = np.transpose(B)
coeffs_0 = np.dot(np.linalg.inv(np.dot(A.T, A)),np.dot(A.T, B))*np.mean(pdl_flux)
pdl_model_0 = np.dot(A,coeffs_0)
return [pdl_model_0,coeffs_0]
def plot_results(plot,pdl_wave,pdl_output,output_name,title):
wave_now=pdl_wave
if plot > 0 :
if plot == 1:
dev_plot="null"
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
if plot > 1:
if plot == 2:
dev_plot=output_name+".pdf"
if plot == 3:
dev_plot=output_name+".png"
if plot > 3:
dev_plot=output_name+".jpg"
stats0=np.mean(pdl_output[:][0])
stats1=np.sqrt(np.sum((pdl_output[:][0]-stats0)**2)/(len(pdl_output[:][0])-1))
y_min=-0.5*stats0-0.75*stats1
y_max=2*stats0+6*stats1
nx=len(wave_now)
ny=len(pdl_output)
min_wave=wave_now[0]
max_wave=wave_now[nx-2]
fig = plt.figure()
plt.axis([min_wave,max_wave,y_min,y_max])
plt.xlabel("Wavelength",fontsize=14)
plt.ylabel("Flux",fontsize=14)
plt.title(title,fontsize=15)
jet = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=ny-1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for j in range(0, ny):
flux=pdl_output[:][j]
color_val = scalarMap.to_rgba(j)
plt.plot(wave_now,flux,color=color_val)
if dev_plot == "null":
plt.show()
plt.close()
else:
plt.savefig(dev_plot)
plt.close()
def plot_results_min_max(plot,pdl_wave,pdl_output,output_name,title,y_min,y_max):
wave_now=pdl_wave
if plot > 0 :
if plot == 1:
dev_plot="null"
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
if plot > 1:
if plot == 2:
dev_plot=output_name+".pdf"
if plot == 3:
dev_plot=output_name+".png"
if plot > 3:
dev_plot=output_name+".jpg"
nx=len(wave_now)
ny=len(pdl_output)
min_wave=wave_now[0]
max_wave=wave_now[nx-2]
fig = plt.figure()
plt.axis([min_wave,max_wave,y_min,y_max])
plt.xlabel("Wavelength",fontsize=14)
plt.ylabel("Flux",fontsize=14)
plt.title(title,fontsize=15)
jet = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=ny-1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for j in range(0, ny):
flux=pdl_output[:][j]
color_val = scalarMap.to_rgba(j)
plt.plot(wave_now,flux,color=color_val)
if dev_plot == "null":
plt.show()
plt.close()
else:
plt.savefig(dev_plot)
plt.close()
def smooth_ratio(pdl_data,pdl_model_spec_min,sigma):
pdl_model_spec_min[np.where(pdl_model_spec_min == 0)[0]]=1.
pdl_rat=pdl_data/pdl_model_spec_min
pdl_rat[np.isnan(pdl_rat)]=1
if sigma < 1 :
sigma=1
smooth_ratio=my.median_filter(int(7*2.354*sigma),pdl_rat)
smooth_ratio[np.isnan(smooth_ratio)]=1
return smooth_ratio
def fit_ssp_lin_no_zero_mine(redshift,sigma,Av_NOW,crval,cdelt,crpix,nf,n_c,pdl_flux_c_ini,hdr_c_ini,wave_unc,masked,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,MIN_CHISQ,plot):
#Defining variables
Av = Av_NOW
iter_max = 5
last_chi = 1e12
n_unc = len(flux_unc)
pdl_model_spec_min = np.zeros(n_unc)
flux_masked = np.zeros(n_unc)
coeffs = np.zeros([nf,3])
coeffs_N = np.zeros([nf,1])
coeffs_NM = np.zeros([nf,1])
coeffs_cat = np.zeros([nf+1, n_mc * n_mc])
pdl_model_spec_cat = np.zeros([n_unc, n_mc * n_mc])
wave_c = []
dpix_c_val = []
name = []
age_mod = []
met_mod = []
ml = []
flux_c = []
model = np.zeros([n_unc,nf])
model_no_mask = np.zeros([n_unc,nf])
age_min = 0
met_min = 0
med_norm = 1
Av_min = 0
age_min_mass = 0
met_min_mass = 0
Av_min_mass = 0
print 'Este nf'
#Masking the flux and setting minimu Av to zero (WHY would it not be zero at this point)
for i in range(0, n_unc):
flux_masked[i]=flux_unc[i]*masked[i]
for i in range(0, nf):
if Av[i] < 0:
Av[i]=0
#---- Starting the un-linear fitting
#Making a wavelength vector corrected from redshift
for j in range(0, n_c):
wave_c.extend([(crval+cdelt*(j+1-crpix))*(1+redshift)])
if j > 0:
dpix_c_val.extend([wave_c[j]-wave_c[j-1]])
dpix_c_val[0] = dpix_c_val[1]
#Defining some resolution
dpix_c = wave_c[1] - wave_c[0]
rsigma = sigma/dpix_c #This sigma is a velocity dispersion
print '*Number of kinematic functions (nf)', nf
print '*n_unc (pixel points)', n_unc
#Looping through the kinematic populations (3)
for iii in range(0, nf):
header = "NAME" + str(iii)
#Changing name in header so we can read age and metallicity
name.extend([hdr_c_ini[header]])
name_min = name[iii]
name_min = name_min.replace('spec_ssp_','')
name_min = name_min.replace('.spec','')
name_min = name_min.replace('.dat','')
data = name_min.split('_')
AGE = data[0]
MET = data[1]
#Extracting age
if 'Myr' in AGE:
age = AGE.replace('Myr','')
age = float_(age)/1000.
else:
age = AGE.replace('Gyr','')
age = float_(age)
age_mod.extend([age])
#Extracting metallicity
met=float_(MET.replace('z','0.'))
met_mod.extend([met])
#Reading from header
header = "NORM" + str(iii)
val_ml = float_(hdr_c_ini[header])
if val_ml != 0:
ml.extend([1/val_ml])
else:
ml.extend([1])
#Defining empty kernel
box = int(3 * rsigma) if int(3 * rsigma) < 3 else 3
kernel = np.zeros([1,2*box+1])
norm = 0
flux_c.extend([0])
#filling the kernel with sigma (the norm factor is the sum of the gaussian)
for j in range(0, 2*box+1):
gaus = np.exp(-0.5*(((j-box)/rsigma)**2))
kernel[0,j] = gaus
norm = norm+gaus
kernel = kernel/norm
pdl_flux_c_conv = convolve2d(pdl_flux_c_ini, kernel, mode='same')
pdl_flux_c = pdl_flux_c_conv[iii,:]
out_spec_pdl = interp1d(wave_c, pdl_flux_c,bounds_error=False, fill_value=0.)(wave_unc)
n_c_out = out_spec_pdl.shape
error = []
#Generate spectrum long matrix
for i in range(0, n_unc):
val = 0 if np.isnan(out_spec_pdl[i]) else out_spec_pdl[i]
model[i][iii] = val * masked[i]
model_no_mask[i][iii] = val
#Increasing an error vector
if masked[i] > 0:
error.extend([0.01 * abs(e_flux_unc[i])])
else:
error.extend([0])
#Adjusting the dust atenuation
pdl_model = np.zeros([n_unc,nf])
pdl_model_no_mask = np.zeros([n_unc,nf])
pdl_error = np.zeros(n_unc)
pdl_masked = masked
pdl_dust_spec = np.zeros([n_unc,nf])
for j in range(0, nf):
for i in range(0, n_unc):
wave_res = wave_unc[i]/(1+redshift)
dust_rat = my.A_l(3.1,wave_res)
dust = 10**(-0.4*Av[j]*dust_rat)
pdl_dust_spec[i][j] = dust
val = model[i][j]*dust
pdl_model[i][j] = val
val_no_mask = model_no_mask[i][j] * dust
pdl_model_no_mask[i][j] = val_no_mask
e_val = error[i]
val_now = 1 if e_flux_unc[i] == 0 else e_flux_unc[i]
pdl_error[i] = 1.0/(abs(val_now)**2)
#---- Starting the linear fitting
pdl_flux_masked = flux_masked
ini_cat = 0
pdl_C_flast = np.ones(nf)
#Initial stellar synthesis without restrictions!
y_model_now, coeffs = linfit1d(pdl_flux_masked, pdl_model, 1/pdl_error)
y_model_now = y_model_now[:,0]
#Counting population coefficients where are positive and negative
nf_new, nf_neg = 0, 0
for k in range(0, nf):
C = coeffs[k][0]
if C > 0:
nf_new = nf_new + 1
else:
nf_neg = nf_neg + 1
MOD = []
if nf_new > 0 :
#Repeating fit until no negative coefficients (a)
while nf_neg > 0:
pdl_model_new = np.zeros([n_unc,nf_new])
nf_i = 0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
pdl_model_new[:, nf_i] = pdl_model[:, k]
MOD.extend([k])
nf_i = nf_i+1
else:
coeffs[k][0] = 0
#New fit
yfit, coeffs_new = linfit1d(pdl_flux_masked, pdl_model_new, 1/pdl_error)
y_model_now = yfit[:,0]
nf_i = 0
nf_neg = 0
nf_new = 0
print 'Empieza la operacion:'
for k in range(0, nf):
print '-k:',k
C = coeffs[k][0]
if C > 0:
val = coeffs_new[nf_i][0]
nf_i = nf_i+1
if val > 0:
coeffs[k][0] = val
nf_new = nf_new+1
else:
coeffs[k][0]=0
nf_neg=nf_neg+1
if nf_new == 0:
nf_neg=0
else:
nf_new=nf
#---- Generate output parameters
chi = 0
chi2 = 0
NFREE = 0
out_spec = []
chi_sec = []
res_spec = []
model_spec_min = []
model_spec = []
#Chi parameteters
for j in range(0, n_unc):
out_spec.extend([y_model_now[j]])
model_spec.extend([out_spec[j]])
res_spec.extend([flux_unc[j]-model_spec[j]])
model_spec_min.extend([model_spec[j]])
chi_sec.extend([0])
if flux_unc[j] != 0 and out_spec[j] !=0 and e_flux_unc[j] != 0:
chi = chi+masked[j]*((flux_masked[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
chi_sec[j] = masked[j]*((flux_unc[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
NFREE = NFREE + 1
chi_sq = chi
if NFREE > 0:
chi_sq = (chi_sq/float_(NFREE))**0.5
chi_sq_min_now = chi_sq
y_model_end = y_model_now
min_coeffs = coeffs
chi_sq = chi_sq_min_now
norm_C = 0
norm_C_mass = 0
for k in range(0, nf):
dust = 10**(-0.4*Av[k]*dust_rat)
C = coeffs[k][0]
norm_C = norm_C + C
norm_C_mass = norm_C_mass+C*ml[k]
coeffs_N[k][0] = norm_C
coeffs_NM[k][0] = norm_C_mass
pdl_model_spec_min = pdl_model_spec_min + C * pdl_model_no_mask[:,k]
#Summing populations
for k in range(0, nf):
C = coeffs[k][0]
if norm_C > 0:
age_min = age_min+C*np.log10(age_mod[k])/norm_C
met_min = met_min+C*met_mod[k]/norm_C
Av_min = Av_min+C*Av[k]/norm_C
CN = C/norm_C
C_now=C*med_norm
if norm_C_mass > 0:
age_min_mass = age_min_mass+C*np.log10(ml[k]*age_mod[k])/norm_C_mass
met_min_mass = met_min_mass+C*ml[k]*met_mod[k]/norm_C_mass
Av_min_mass = Av_min_mass+C*ml[k]*Av[k]/norm_C_mass
age_min = 10**(age_min)
age_min_mass = 10**(age_min_mass)
#Output arrays
pdl_age_mod = np.array(age_mod)
pdl_met_mod = np.array(met_mod)
pdl_ml = np.array(ml)
pdl_Av = np.array(Av)
#Plot configuration
pdl_res = flux_unc-pdl_model_spec_min
pdl_wave_unc = wave_unc
out_ps_now = "junk"
title = "X="+str(chi_sq)+" Av="+str(Av[0])+" z="+str(redshift)+" sigma="+str(sigma)
# if plot > 0:
# plot_results(plot,pdl_wave_unc,[pdl_flux_masked,pdl_model_spec_min,pdl_res],out_ps_now,title)
return [chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_model_spec_min,pdl_res]
def fit_ssp_lin_no_zero(redshift,sigma,Av_NOW,crval,cdelt,crpix,nf,n_c,pdl_flux_c_ini,hdr_c_ini,wave_unc,masked,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,MIN_CHISQ,plot):
Av=Av_NOW
iter_max=5
last_chi=1e12
n_unc=len(flux_unc)
pdl_model_spec_min=np.zeros(n_unc)
flux_masked=np.zeros(n_unc)
coeffs=np.zeros([nf,3])
coeffs_N=np.zeros([nf,1])
coeffs_NM=np.zeros([nf,1])
coeffs_cat=np.zeros([nf+1,n_mc*n_mc])
pdl_model_spec_cat=np.zeros([n_unc,n_mc*n_mc])
for i in range(0, n_unc):
flux_masked[i]=flux_unc[i]*masked[i]
for i in range(0, nf):
if Av[i] < 0:
Av[i]=0
wave_c=[]
dpix_c_val=[]
for j in range(0, n_c):
wave_c.extend([(crval+cdelt*(j+1-crpix))*(1+redshift)])
if j > 0:
dpix_c_val.extend([wave_c[j]-wave_c[j-1]])
dpix_c_val[0]=dpix_c_val[1]
dpix_c=wave_c[1]-wave_c[0]
rsigma=sigma/dpix_c
print '-----n_unc', n_unc
print '-----masked', len(masked), np.array(masked)
print '-----wave_c', len(wave_c), np.array(wave_c)
print '-----rsigma', rsigma
name=[]
age_mod=[]
met_mod=[]
ml=[]
flux_c=[]
model=np.zeros([n_unc,nf])
model_no_mask=np.zeros([n_unc,nf])
age_min=0
met_min=0
med_norm=1
Av_min=0
age_min_mass=0
met_min_mass=0
Av_min_mass=0
for iii in range(0, nf):
header="NAME"+str(iii)
name.extend([hdr_c_ini[header]]);
name_min=name[iii]
name_min=name_min.replace('spec_ssp_','')
name_min=name_min.replace('.spec','')
name_min=name_min.replace('.dat','')
data=name_min.split('_')
AGE=data[0]
MET=data[1]
if 'Myr' in AGE:
age=AGE.replace('Myr','')
age=float_(age)/1000.
else:
age=AGE.replace('Gyr','')
age=float_(age)
met=float_(MET.replace('z','0.'))
age_mod.extend([age])
met_mod.extend([met])
header="NORM"+str(iii)
val_ml=float_(hdr_c_ini[header])
if val_ml != 0:
ml.extend([1/val_ml])
else:
ml.extend([1])
box=int(3*rsigma)
if box < 3:
box=3
kernel=np.zeros([1,2*box+1])
norm=0
flux_c.extend([0])
for j in range(0, 2*box+1):
gaus=np.exp(-0.5*(((j-box)/rsigma)**2))
kernel[0,j]=gaus
norm=norm+gaus
kernel=kernel/norm;
pdl_flux_c_conv = convolve2d(pdl_flux_c_ini,kernel,mode='same')
pdl_flux_c = pdl_flux_c_conv[iii,:]
out_spec_pdl = interp1d(wave_c, pdl_flux_c,bounds_error=False,fill_value=0.)(wave_unc)
n_c_out=out_spec_pdl.shape
error=[]
for i in range(0,n_unc):
val=out_spec_pdl[i]
if np.isnan(val):
val=0
model[i][iii]=val*masked[i]
model_no_mask[i][iii]=val
if masked[i] > 0:
error.extend([0.01*abs(e_flux_unc[i])])
else:
error.extend([0])
# print 'Hector model', np.sum(np.array(model[:,iii]))
# print 'Hector model_no_mask', np.sum(np.array(model_no_mask[:,iii]))
# print 'Hector error', np.sum(np.array(error))
print '-----model', model.shape, np.sum(model)
pdl_model=np.zeros([n_unc,nf])
pdl_model_no_mask=np.zeros([n_unc,nf])
pdl_error=np.zeros(n_unc)
pdl_masked=masked
pdl_dust_spec=np.zeros([n_unc,nf])
for j in range(0, nf):
for i in range(0, n_unc):
wave_res=wave_unc[i]/(1+redshift)
dust_rat=my.A_l(3.1,wave_res)
dust=10**(-0.4*Av[j]*dust_rat)
pdl_dust_spec[i][j]=dust
val=model[i][j]*dust
pdl_model[i][j]=val
val_no_mask=model_no_mask[i][j]*dust
pdl_model_no_mask[i][j]=val_no_mask
e_val=error[i]
val_now=e_flux_unc[i]
if val_now == 0:
val_now=1
pdl_error[i]=1.0/(abs(val_now)**2)
print '-----pdl_model', np.sum(pdl_model)
#
# We fit
#
pdl_flux_masked=flux_masked
ini_cat=0
pdl_C_flast=np.ones(nf)
# Just a linear FIT, without restrictions!
# print 'Inputs linfit1d'
# print np.sum(flux_unc)
# print np.sum(pdl_flux_masked)
# print np.sum(pdl_model)
# print np.sum(1/pdl_error)
[y_model_now, coeffs] = linfit1d(pdl_flux_masked,pdl_model,1/pdl_error)
y_model_now=y_model_now[:,0]
# print '-----y_model_now', y_model_now, np.sum(y_model_now), y_model_now.shape
# print '-----coeffs', coeffs
#
# We remove the models that are negative
#
nf_new=0
nf_neg=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
nf_new=nf_new+1
else:
nf_neg=nf_neg+1
print '----nf_new:', nf_new, (coeffs[:,0] > 0).sum()
print '----nf_neg:', nf_neg, (coeffs[:,0] < 0).sum()
print '----n_unc:', n_unc
print '----n_coeffs:', coeffs.shape
print '----nf:', nf
MOD=[]
if nf_new > 0 :
while nf_neg > 0:
pdl_model_new=np.zeros([n_unc,nf_new])
nf_i=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
pdl_model_new[:,nf_i] = pdl_model[:,k]
MOD.extend([k])
nf_i=nf_i+1
else:
coeffs[k][0]=0
[yfit, coeffs_new] = linfit1d(pdl_flux_masked,pdl_model_new,1/pdl_error)
y_model_now=yfit[:,0]
nf_i=0
nf_neg=0
nf_new=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
val=coeffs_new[nf_i][0]
nf_i=nf_i+1
if val > 0:
coeffs[k][0]=val
nf_new=nf_new+1
else:
coeffs[k][0]=0
nf_neg=nf_neg+1
if nf_new == 0:
nf_neg=0
#for k in range(0, nf):
# C=coeffs[k][0]
else:
nf_new=nf
# print 'Despues'
# print coeffs
##############################
# CHISQ VALUE
chi=0
chi2=0
NFREE=0
out_spec=[]
chi_sec=[]
res_spec=[]
model_spec_min=[]
model_spec=[]
for j in range(0, n_unc):
out_spec.extend([y_model_now[j]])
model_spec.extend([out_spec[j]])
res_spec.extend([flux_unc[j]-model_spec[j]])
model_spec_min.extend([model_spec[j]])
chi_sec.extend([0])
if flux_unc[j] != 0 and out_spec[j] !=0 and e_flux_unc[j] != 0:
chi=chi+masked[j]*((flux_masked[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
chi_sec[j]=masked[j]*((flux_unc[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
NFREE=NFREE+1
chi_sq=chi
if NFREE > 0:
chi_sq=(chi_sq/float_(NFREE))**0.5
chi_sq_min_now=chi_sq
y_model_end=y_model_now
min_coeffs=coeffs
chi_sq=chi_sq_min_now
norm_C=0
norm_C_mass=0
for k in range(0, nf):
dust=10**(-0.4*Av[k]*dust_rat)
C=coeffs[k][0]
norm_C=norm_C+C
norm_C_mass=norm_C_mass+C*ml[k]
coeffs_N[k][0]=norm_C
coeffs_NM[k][0]=norm_C_mass
pdl_model_spec_min=pdl_model_spec_min+C*pdl_model_no_mask[:,k]
print '-----pdl_model_spec_min', np.sum(pdl_model_spec_min)
for k in range(0, nf):
C=coeffs[k][0]
if norm_C > 0:
age_min=age_min+C*np.log10(age_mod[k])/norm_C
met_min=met_min+C*met_mod[k]/norm_C
Av_min=Av_min+C*Av[k]/norm_C
CN=C/norm_C
C_now=C*med_norm
if norm_C_mass > 0:
age_min_mass=age_min_mass+C*np.log10(ml[k]*age_mod[k])/norm_C_mass
met_min_mass=met_min_mass+C*ml[k]*met_mod[k]/norm_C_mass
Av_min_mass=Av_min_mass+C*ml[k]*Av[k]/norm_C_mass
age_min=10**(age_min)
age_min_mass=10**(age_min_mass)
pdl_age_mod=np.array(age_mod)
pdl_met_mod=np.array(met_mod)
pdl_ml=np.array(ml)
pdl_Av=np.array(Av)
pdl_res=flux_unc-pdl_model_spec_min
pdl_wave_unc=wave_unc
out_ps_now="junk";
title="X="+str(chi_sq)+" Av="+str(Av[0])+" z="+str(redshift)+" sigma="+str(sigma)
# if plot > 0:
# plot_results(plot,pdl_wave_unc,[pdl_flux_masked,pdl_model_spec_min,pdl_res],out_ps_now,title)
return [chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_model_spec_min,pdl_res], [wave_unc, masked, pdl_model_spec_min]
def fit_ssp_lin_no_zero_no_cont(redshift,sigma,Av_NOW,crval,cdelt,crpix,nf,n_c,pdl_flux_c_ini,hdr_c_ini,wave_unc,masked,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,MIN_CHISQ,plot):
Av=Av_NOW
iter_max=5;
last_chi=1e12
n_unc=len(flux_unc)
pdl_model_spec_min=np.zeros(n_unc)
coeffs=np.zeros([nf,3])
coeffs_N=np.zeros([nf,1])
coeffs_NM=np.zeros([nf,1])
coeffs_cat=np.zeros([nf+1,n_mc*n_mc])
pdl_model_spec_cat=np.zeros([n_unc,n_mc*n_mc])
flux_masked=np.zeros(n_unc)
for i in range(0, n_unc):
flux_masked[i]=flux_unc[i]*masked[i]
for i in range(0, nf):
if Av[i] < 0:
Av[i]=0
wave_c=np.zeros(n_c)
dpix_c_val=np.zeros(n_c)
for j in range(0, n_c):
wave_c[j]=(crval+cdelt*(j+1-crpix))*(1+redshift)
if j > 0:
dpix_c_val[j]=wave_c[j]-wave_c[j-1]
dpix_c_val[0]=dpix_c_val[1]
dpix_c=wave_c[1]-wave_c[0]
rsigma=sigma/dpix_c
name=[]
age_mod=[]
met_mod=[]
ml=[]
flux_c=[]
model=np.zeros([n_unc,nf])
model_no_mask=np.zeros([n_unc,nf])
age_min=0
met_min=0
Av_min=0
age_min_mass=0
met_min_mass=0
Av_min_mass=0
med_norm=1
for iii in range(0, nf):
header="NAME"+str(iii)
name.extend([hdr_c_ini[header]]);
name_min=name[iii]
name_min=name_min.replace('spec_ssp_','')
name_min=name_min.replace('.spec','')
name_min=name_min.replace('.dat','')
data=name_min.split('_')
AGE=data[0]
MET=data[1]
if 'Myr' in AGE:
age=AGE.replace('Myr','')
age=float_(age)/1000.
else:
age=AGE.replace('Gyr','')
age=float_(age)
met=float_(MET.replace('z','0.'))
age_mod.extend([age])
met_mod.extend([met])
header="NORM"+str(iii)
val_ml=float_(hdr_c_ini[header])
if val_ml != 0:
ml.extend([1/val_ml])
else:
ml.extend([1])
box=int(3*rsigma)
if box < 3:
box=3
kernel=np.zeros([1,2*box+1])
norm=0
flux_c.extend([0])
for j in range(0, 2*box+1):
gaus=np.exp(-0.5*(((j-box)/rsigma)**2))
kernel[0,j]=gaus
norm=norm+gaus
kernel=kernel/norm;
pdl_flux_c_conv = convolve2d(pdl_flux_c_ini,kernel,mode='same')
pdl_flux_c = pdl_flux_c_conv[iii,:]
out_spec_pdl = interp1d(wave_c, pdl_flux_c,bounds_error=False,fill_value=0.)(wave_unc)
n_c_out=out_spec_pdl.shape
error=[]
for i in range(0,n_unc):
val=out_spec_pdl[i]
if np.isnan(val):
val=0
model[i][iii]=val*masked[i]
model_no_mask[i][iii]=val
if masked[i] > 0:
error.extend([0.01*abs(e_flux_unc[i])])
else:
error.extend([0])
pdl_model=np.zeros([n_unc,nf])
pdl_model_no_mask=np.zeros([n_unc,nf])
pdl_error=np.zeros(n_unc)
pdl_masked=masked
pdl_dust_spec=np.zeros([n_unc,nf])
for j in range(0, nf):
for i in range(0, n_unc):
wave_res=wave_unc[i]/(1+redshift)
dust_rat=my.A_l(3.1,wave_res)
dust=10**(-0.4*Av[j]*dust_rat)
pdl_dust_spec[i][j]=dust
val=model[i][j]*dust
pdl_model[i][j]=val
val_no_mask=model_no_mask[i][j]*dust
pdl_model_no_mask[i][j]=val_no_mask
e_val=error[i]
val_now=e_flux_unc[i]
if val_now == 0:
val_now=1
pdl_error[i]=1.0/(abs(val_now)**2)
#
# We fit
#
pdl_flux_masked=flux_masked
ini_cat=0
pdl_C_flast=np.ones(nf)
# Just a linear FIT, without restrictions!
[y_model_now, coeffs] = linfit1d(pdl_flux_masked,pdl_model,1/pdl_error)
smooth_rati=smooth_ratio(flux_unc,y_model_now[:,0],int(sigma))
y_model_now=y_model_now[:,0]*smooth_rati
#
# We remove the models that are negative
#
nf_new=0
nf_neg=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
nf_new=nf_new+1
else:
nf_neg=nf_neg+1
MOD=[]
if nf_new > 0 :
while nf_neg > 0:
pdl_model_new=np.zeros([n_unc,nf_new])
nf_i=0;
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
pdl_model_new[:,nf_i] = pdl_model[:,k]
MOD.extend([k])
nf_i=nf_i+1
else:
coeffs[k][0]=0
[yfit, coeffs_new] = linfit1d(pdl_flux_masked,pdl_model_new,1/pdl_error)
smooth_rati=smooth_ratio(flux_unc,yfit[:,0],int(sigma))
yfit=yfit[:,0]*smooth_rati
y_model_now=yfit
nf_i=0
nf_neg=0
nf_new=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
val=coeffs_new[nf_i][0]
nf_i=nf_i+1
if val > 0:
coeffs[k][0]=val
nf_new=nf_new+1
else:
coeffs[k][0]=0
nf_neg=nf_neg+1
if nf_new == 0:
nf_neg=0
#for k in range(0, nf):
# C=coeffs[k][0]
else:
nf_new=nf
#sys.exit()
##############################
# CHISQ VALUE
chi=0
chi2=0
NFREE=0
out_spec=[]
chi_sec=[]
res_spec=[]
model_spec_min=[]
model_spec=[]
for j in range(0, n_unc):
out_spec.extend([y_model_now[j]])
model_spec.extend([out_spec[j]])
res_spec.extend([flux_unc[j]-model_spec[j]])
model_spec_min.extend([model_spec[j]])
chi_sec.extend([0])
if flux_unc[j] != 0 and out_spec[j] !=0 and e_flux_unc[j] != 0:
chi=chi+masked[j]*((flux_masked[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
chi_sec[j]=masked[j]*((flux_unc[j]-out_spec[j])**2.)/(e_flux_unc[j])**2.
NFREE=NFREE+1
chi_sq=chi
if NFREE > 0:
chi_sq=(chi_sq/float_(NFREE))**0.5
chi_sq_min_now=chi_sq
y_model_end=y_model_now
min_coeffs=coeffs
chi_sq=chi_sq_min_now
norm_C=0
norm_C_mass=0
for k in range(0, nf):
dust=10**(-0.4*Av[k]*dust_rat)
C=coeffs[k][0]
norm_C=norm_C+C
norm_C_mass=norm_C_mass+C*ml[k]
coeffs_N[k][0]=norm_C
coeffs_NM[k][0]=norm_C_mass
pdl_model_spec_min=pdl_model_spec_min+C*pdl_model_no_mask[:,k]
smooth_rati=smooth_ratio(flux_unc,pdl_model_spec_min,int(sigma))
pdl_model_spec_min=pdl_model_spec_min*smooth_rati
for k in range(0, nf):
C=coeffs[k][0]
if norm_C > 0:
age_min=age_min+C*np.log10(age_mod[k])/norm_C
met_min=met_min+C*met_mod[k]/norm_C
Av_min=Av_min+C*Av[k]/norm_C
CN=C/norm_C
C_now=C*med_norm
if norm_C_mass > 0:
age_min_mass=age_min_mass+C*np.log10(ml[k]*age_mod[k])/norm_C_mass
met_min_mass=met_min_mass+C*ml[k]*met_mod[k]/norm_C_mass
Av_min_mass=Av_min_mass+C*ml[k]*Av[k]/norm_C_mass
age_min=10**(age_min)
age_min_mass=10**(age_min_mass)
pdl_age_mod=np.array(age_mod)
pdl_met_mod=np.array(met_mod)
pdl_ml=np.array(ml)
pdl_Av=np.array(Av)
pdl_res=flux_unc-pdl_model_spec_min
pdl_wave_unc=wave_unc
out_ps_now="junk";
title="X="+str(chi_sq)+" Av="+str(Av[0])+" z="+str(redshift)+" sigma="+str(sigma)
if plot > 0:
# plot_results(plot,pdl_wave_unc,[pdl_flux_masked,pdl_model_spec_min,pdl_res],out_ps_now,title)
plot_results(plot,pdl_wave_unc,[pdl_flux_masked,y_model_end,pdl_res],out_ps_now,title)
#sys.exit()
return [chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_model_spec_min,pdl_res]
def copy_a(n_mod,a_print):
a_copy=np.zeros([n_mod, 9])
for ii in range(0, n_mod):
for j in range(0, 9):
a_copy[ii][j]=a_print[ii][j];
return a_copy
def copy_a_results(n_mod,kk,a_print,a_copy):
if kk > 0:
a_copy=np.concatenate((a_copy,np.zeros([1, n_mod, 9])),axis=0)
for ii in range(0, n_mod):
for j in range(0, 9):
a_copy[kk][ii][j]=a_print[ii][j]
return a_copy
def create_single_model(pdl_wave,i_now,typef,a_c):
dims=pdl_wave.shape
nx=dims[0]
pdl_out=np.zeros([1,nx])
for i in range(0, nx):
w=pdl_wave[i]
if typef[i_now] == "eline\n":
speed_of_light=299792.458
factor=(1+a_c[i_now][3]/speed_of_light+a_c[i_now][5])
e1=1
Y1=1
if a_c[i_now][2] != 0:
e1=np.exp(-0.5*((w-a_c[i_now][0]*factor)/a_c[i_now][2])**2)
Y1=a_c[i_now][1]*e1/(a_c[i_now][2]*((2*3.1416)**0.5))
pdl_out[0][i]=Y1
if typef[i_now] == "poly1d\n":
Yi=0
for ii in range(0, 9):
Yi=Yi+a_c[i_now][ii]*(w)**(ii)
pdl_out[0][i]=Yi
return pdl_out
def create_single_model_one(pdl_wave,i_now,typef,a_c):
dims=pdl_wave.shape
nx=dims[0]
pdl_out=np.zeros([1,nx])
for i in range(0, nx):
w=pdl_wave[i]
if typef[i_now] == "eline\n":
speed_of_light=299792.458
factor=(1+a_c[i_now][3]/speed_of_light+a_c[i_now][5])
e1=1
Y1=1
if a_c[i_now][2] != 0:
e1=np.exp(-0.5*((w-a_c[i_now][0]*factor)/a_c[i_now][2])**2)
Y1=1.0*e1/(a_c[i_now][2]*((2*3.1416)**0.5))
pdl_out[0][i]=Y1
if typef[i_now] == "poly1d\n":
Yi=0
for ii in range(0, 9):
Yi=Yi+a_c[i_now][ii]*(w)**(ii)
pdl_out[0][i]=Yi
return pdl_out
def create_single_model_poly(pdl_wave,ii):
dims=pdl_wave.shape
nx=dims[0]
pdl_out=np.zeros([1,nx])
for i in range(0, nx):
w=pdl_wave[i]
Yi=(w)**(ii)
pdl_out[0][i]=Yi
return pdl_out
def fit_elines_grad_rnd_new(pdl_wave,pdl_flux,pdl_e_flux,n_mod,chi_goal,d_chi_goal,typef,a,ia,a0,a1,link,n_rnd,pdl_masked,defi,SCALE_INI):
ttt=1
a_out=np.zeros([n_mod, 9])
SCALE=SCALE_INI
n_mc=n_rnd
cont=0
dims=pdl_flux.shape
if len(dims) == 1:
nx=dims[0]
# First Guess
n_points=np.sum(pdl_masked)
pdl_model_0=np.zeros(nx)
pdl_model_cont_0=np.zeros(nx)
for i in range(0,n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a)
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=3*(chi_sq/(n_points-n_mod-1))**0.5
chi_sq_ini=chi_sq
a_out=copy_a(n_mod,a)
a_now=np.zeros([n_mod, 9])
pdl_rnd=ran.randn(n_mod*9*n_mc)
pdl_rnd_lin=ran.rand(n_mod*9*n_mc)
# 1st we derive the redshift!
new_mc=int(n_mc/2)
for ii in range(0, new_mc):
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i)+(9*n_mod*ii))]#*ii]
rnd_lin=pdl_rnd_lin[(j+(9*i)+(9*n_mod*ii))]#*ii]
if ia[i][j]==1:
if link[i][j]==-1:
if typef[i] != "poly1d\n":
if j==3:
a_now[i][j]=a0[i][j]+rnd_lin*(a1[i][j]-a0[i][j])#*ii/new_mc
else:
a_now[i][j]=a_out[i][j]
else:
a_now[i][j]=a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < a0[i][j]:
a_now[i][j] = a0[i][j]
if a_now[i][j] > a1[i][j]:
a_now[i][j] = a1[i][j]
else:
k=link[i][j]-1
method=a1[i][j]
if method==0:
a_now[i][j]=a_now[k][j]+a0[i][j]
else:
a_now[i][j]=a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
n_mod_free=0
for i1 in range(0,n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
if n_mod_free == 0:
pdl_model_n=pdl_tmp
else:
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
else:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
ik=link[i1][1]-1
t = pdl_model_n[ik,:]
pdl_model_n[ik,:]=pdl_tmp*a0[i1][1]+t
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
pdl_tmp=create_single_model_poly(pdl_wave,j)
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
pdl_grandom=ran.randn(nx)
pdl_flux_fit=pdl_flux-cont+pdl_grandom*pdl_e_flux
dim_n=pdl_model_n.shape
if dim_n[0] > 1:
[pdl_model_0, coeffs_0] = linfit1d(pdl_flux_fit,pdl_model_n)
else:
pdl_model_n[np.where(pdl_model_n == 0)[0]]=1.
pdl_rat=pdl_flux_fit/pdl_model_n
pdl_rat[np.isnan(pdl_rat)]=0
stats=np.median(pdl_rat)
coeffs_0=np.zeros(1)+1
pdl_model_0=stats*pdl_model_n
coeffs_0=stats*coeffs_0
pdl_model_0=pdl_model_0+cont
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
a_now[i1][1] = coeffs_0[n_mod_free]
if a_now[i1][1] < a0[i1][1]:
a_now[i1][1]=a0[i1][1]
if a_now[i1][1] > a1[i1][1]:
a_now[i1][1]=a1[i1][1]
n_mod_free=n_mod_free+1
else:
k=link[i1][1]-1
method=a1[i1][1]
if method == 0:
a_now[i1][1]=a_now[k][1]+a0[i1][1]
else:
a_now[i1][1]=a_now[k][1]*a0[i1][1]
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
val=coeffs_0[n_mod_free]
a_now[i1][j]=val
n_mod_free=n_mod_free+1
pdl_model_0=np.zeros(nx)
pdl_model_cont_0=np.zeros(nx)
#pdl_model=pdl_model_0
#pdl_model_cont=pdl_model_cont_0
for i in range(0, n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a_now)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
pdl_a_now=a_now
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq <= chi_sq_ini:
a_out=copy_a(n_mod,a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
ttt=0
ii=0
a_now=copy_a(n_mod,a_out)
chi_sq_ini=1e12;
#print chi_sq, chi_sq_ini
#
# 2nd we derive the sigma
#
new_mc=int(n_mc/3)
# pdl_rnd=ran.randn(n_mod*9*n_mc)
# pdl_rnd_lin=ran.rand(n_mod*9*n_mc)
for ii in range(0, new_mc):
#
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i)+(9*n_mod*ii))]
rnd_lin=pdl_rnd_lin[(j+(9*i)+(9*n_mod*ii))]
if ia[i][j] == 1:
if link[i][j] == -1:
if typef[i] != "poly1d\n":
if j == 2:
a_now[i][j]=a0[i][j]+rnd_lin*(a1[i][j]-a0[i][j])#*ii/new_mc
#print a_now[i][j], a0[i][j], (a1[i][j]-a0[i][j]), chi_sq
else:
a_now[i][j]=a_out[i][j]
else:
a_now[i][j] = a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < a0[i][j]:
a_now[i][j] = a0[i][j]
if a_now[i][j] > a1[i][j]:
a_now[i][j] = a1[i][j]
else:
k=link[i][j]-1
method=a1[i][j]
if method == 0:
a_now[i][j] = a_now[k][j]+a0[i][j]
else:
a_now[i][j] = a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
if n_mod_free == 0:
pdl_model_n=pdl_tmp
else:
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
else:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
i2=link[i1][1]-1
t=pdl_model_n[i2,:]
pdl_model_n[i2,:]=pdl_tmp*a0[i1][1]+t
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
pdl_tmp=create_single_model_poly(pdl_wave,j)
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
pdl_grandom=ran.randn(nx)
pdl_flux_fit=pdl_flux-cont+pdl_grandom*pdl_e_flux
dim_n=pdl_model_n.shape
if dim_n[0] > 1:
[pdl_model_0, coeffs_0] = linfit1d(pdl_flux_fit,pdl_model_n)
else:
pdl_model_n[np.where(pdl_model_n == 0)[0]]=1.
pdl_rat=pdl_flux_fit/pdl_model_n
pdl_rat[np.isnan(pdl_rat)]=0
stats=np.median(pdl_rat)
coeffs_0=np.zeros(1)+1
pdl_model_0=stats*pdl_model_n
coeffs_0=stats*coeffs_0
pdl_model_0=pdl_model_0+cont
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
a_now[i1][1]=coeffs_0[n_mod_free]
n_mod_free=n_mod_free+1
else:
k=link[i1][1]-1
method=a1[i1][1]
if method == 0:
a_now[i1][1]=a_now[k][1]+a0[i1][1]
else:
a_now[i1][1]=a_now[k][1]*a0[i1][1]
for j in range(0, 9):
if ia[i1][j] == 1 and link[i1][j] == -1:
if a_now[i1][j] < a0[i1][j]:
a_now[i1][j]=a0[i1][j]
if a_now[i1][j] > a1[i1][j]:
a_now[i1][j]=a1[i1][j]
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
val=coeffs_0[n_mod_free]
a_now[i1][j]=val
n_mod_free=n_mod_free+1
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
pdl_a_now=a_now
#print pdl_model_cont_0.shape
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0[:,0])**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq <= chi_sq_ini:
a_out=copy_a(n_mod,a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
ttt=0
#print a_out[0][1],chi_sq
a_now=copy_a(n_mod,a_out)
# We start the fitting loop!
ii=0
SCALE_IN=SCALE
new_mc=0
while ii < new_mc:
#
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i))*ii]
rnd_lin=pdl_rnd_lin[(j+(9*i))*ii]
if ia[i][j] == 1:
if link[i][j] == -1:
A1=a1[i][j]
A0=a0[i][j]
if a1[i][j] > 1.3*a_out[i][j]:
A1=1.3*a_out[i][j]
if a0[i][j] < 0.7*a_out[i][j]:
A0=0.7*a_out[i][j]
if typef[i] == "eline\n":
if j == 3:
a_now[i][j]=a_out[i][j]+SCALE*rnd*(A1-A0)/(5*new_mc)
else:
a_now[i][j]=a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < A0:
a_now[i][j]=A0
if a_now[i][j] > A1:
a_now[i][j]=A1
else:
a_now[i][j]=a_out[i][j]
else:
k=link[i][j]-1
method=a1[i][j]
if method == 0:
a_now[i][j]=a_now[k][j]+a0[i][j]
else:
a_now[i][j]=a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
# MC test
pdl_model_0=np.zeros(nx)
pdl_model_cont_0=np.zeros(nx)
for i in range(0, n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a_now)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq < chi_sq_ini:
a_out=copy_a(n_mod, a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
SCALE=SCALE*0.99
ttt=0
if SCALE < 0.1*SCALE_IN:
SCALE=SCALE_IN*0.1
else:
SCALE=SCALE_IN
if (abs(chi_sq-chi_sq_ini) < d_chi_goal) or (chi_sq_ini < chi_goal):
ii=n_mc
ii=ii+1
chi_sq_now=chi_sq_ini
if ttt == 1:
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
#print ttt, "HOLA TEST!!!!!!!"
#
# We force lineal!
#
#a_out=copy_a(n_mod,a_now_lin)
pdl_a_out=a_out
return [chi_sq_now,pdl_a_out,pdl_model,pdl_model_cont]
def copy_a_pdl(n_mod,a_print):
a_copy=np.zeros([n_mod, 9])
for ii in range(0, n_mod):
for j in range(0, 9):
a_copy[ii][j]=a_print[ii][j]
return a_copy
def print_a_final(n_mod,a_print,typef,chi_a,back_noise=[1],si=0):
nx=len(back_noise)
if nx == 1:
back_noise=0
print str(n_mod)+" "+str(chi_a)
line=""
for ii in range(0, n_mod):
if typef[ii] == "eline\n":
typefs="eline"
else:
typefs="poly1d"
line=line+typefs+" "
e_F=back_noise*2.354*a_print[0][ii][2]
a_print[1][ii][1]=np.sqrt((a_print[1][ii][1])**2+e_F**2)
for j in range(0, 9):
line=line+str(a_print[0][ii][j])+" "+str(a_print[1][ii][j])+" "
if si == 0:
print line
line=""
return a_print
def print_a_final_file(n_mod,a_print,typef,chi_a,outfile):
print "OUT_FILE = "+outfile
f=open(outfile, "w")
f.write(str(n_mod)+" "+str(chi_a)+"\n")
line=""
for ii in range(0, n_mod):
if typef[ii] == "eline\n":
typefs="eline"
else:
typefs="poly1d"
line=line+typefs+" "
for j in range(0, 9):
line=line+str(a_print[0][ii][j])+" "+str(a_print[1][ii][j])+" "
f.write(line+"\n")
line=""
f.close
def print_a_final_file_add(n_mod,a_print,typef,chi_a,outfile,i_val=0,sile=0):
if sile == 0:
print "OUT_FILE = "+outfile
f=open(outfile, "a")
f.write(str(n_mod)+" "+str(chi_a)+" "+str(i_val)+"\n")
line=""
for ii in range(0, n_mod):
if typef[ii] == "eline\n":
typefs="eline"
else:
typefs="poly1d"
line=line+typefs+" "
for j in range(0, 9):
line=line+str(a_print[0][ii][j])+" "+str(a_print[1][ii][j])+" "
f.write(line+"\n")
line=""
f.close
def print_a_final_file_add_mpi(n_mod,a_print,typef,chi_a,outfile,i_val=0):
#print "OUT_FILE = "+outfile
#f=open(outfile, "a")
#f.write(str(n_mod)+" "+str(chi_a)+" "+str(i_val)+"\n")
warr=[]
warr.extend([str(n_mod)+" "+str(chi_a)+" "+str(i_val)+"\n"])
line=""
for ii in range(0, n_mod):
if typef[ii] == "eline\n":
typefs="eline"
else:
typefs="poly1d"
line=line+typefs+" "
for j in range(0, 9):
line=line+str(a_print[0][ii][j])+" "+str(a_print[1][ii][j])+" "
#f.write(line+"\n")
warr.extend([line+"\n"])
line=""
#f.close
return warr
def add_back_noise(n_mod,a_print,typef,chi_a,back_noise):
for ii in range(0, n_mod):
e_F=back_noise*2.354*a_print[0][ii][2]
a_print[1][ii][1]=np.sqrt((a_print[1][ii][1])**2+e_F**2)
return a_print
def mean_a_results(n_mod,nk,a_print,ia):
a_copy=np.zeros([2,n_mod,9])
for ii in range(0, n_mod):
for j in range(0, 9):
a_tmp=np.zeros(nk)
for k in range(0, nk):
a_tmp[k]=a_print[k][ii][j]
val=np.mean(a_tmp)
e_val=np.std(a_tmp)
a_copy[0][ii][j]=val
a_copy[1][ii][j]=e_val*ia[ii][j]
return a_copy
def mean_a_results_last(n_mod,nk,a_print,ia):
if nk == 0:
nk=1
a_copy=np.zeros([2,n_mod,9])
for ii in range(0, n_mod):
for j in range(0, 9):
a_tmp=np.zeros(nk)
for k in range(0, nk):
a_tmp[k]=a_print[k][ii][j]
val=np.mean(a_tmp)
e_val=np.std(a_tmp)
if e_val == 0:
e_val=0.1*val
a_copy[0][ii][j]=a_tmp[nk-1]
a_copy[1][ii][j]=e_val*ia[ii][j]
return a_copy
def add_a_results_elines(n_mod,a_final,a_type,n_mod_fixed,a_final_fixed,a_type_fixed):
KK=0
for ii in range(0, n_mod):
if a_type[ii] == "eline\n":
if n_mod_fixed+KK > 0:
a_final_fixed=np.vstack((a_final_fixed,np.zeros(9)))
a_type_fixed.extend([a_type[ii]])
for j in range(0, 9):
a_final_fixed[KK+n_mod_fixed][j]=a_final[0][ii][j]
KK=KK+1
n_mod_fixed=n_mod_fixed+KK
return [n_mod_fixed,a_final_fixed,a_type_fixed]
def fit_ssp_lin_MC(redshift,sigma,Av_NOW,crval,cdelt,crpix,nf,n_c,pdl_flux_c_ini,hdr_c_ini,wave_unc,masked,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,MIN_CHISQ,plot):
Av=Av_NOW;
iter_max=5
last_chi=1e12
flux_unc=np.array(flux_unc)
flux_unc[np.isnan(flux_unc)]=0
flux_unc[np.isinf(flux_unc)]=0
n_unc=len(flux_unc)
coeffs_N=np.zeros([nf,1])
coeffs_NM=np.zeros([nf,1])
coeffs_cat=np.zeros([nf+1,n_mc])
pdl_model_spec_cat=np.zeros([n_unc,n_mc])
pdl_1st_model=[]
flux_masked=np.zeros(n_unc)
for i in range(0, n_unc):
flux_masked[i]=flux_unc[i]*masked[i]
for i in range(0, nf):
if Av[i] < 0:
Av[i]=0
wave_c=np.zeros(n_c)
dpix_c_val=[]
for j in range(0, n_c):
wave_c[j]=(crval+cdelt*(j+1-crpix))*(1+redshift)
if j > 0:
dpix_c_val.extend([wave_c[j]-wave_c[j-1]])
dpix_c_val[0]=dpix_c_val[1]
dpix_c=wave_c[1]-wave_c[0]
rsigma=sigma/dpix_c
name=[]
age_mod=[]
met_mod=[]
ml=[]
flux_c=[]
model=np.zeros([n_unc,nf])
model_no_mask=np.zeros([n_unc,nf])
MED_NORM=[]
age_min=0
met_min=0
Av_min=0
age_min_mass=0
met_min_mass=0
Av_min_mass=0
for iii in range(0, nf):
header="NAME"+str(iii)
name.extend([hdr_c_ini[header]]);
name_min=name[iii]
name_min=name_min.replace('spec_ssp_','')
name_min=name_min.replace('.spec','')
name_min=name_min.replace('.dat','')
data=name_min.split('_')
AGE=data[0]
MET=data[1]
if 'Myr' in AGE:
age=AGE.replace('Myr','')
age=float_(age)/1000.
else:
age=AGE.replace('Gyr','')
age=float_(age)
met=float_(MET.replace('z','0.'))
age_mod.extend([age])
met_mod.extend([met])
header="NORM"+str(iii)
val_ml=float_(hdr_c_ini[header])
if val_ml != 0:
ml.extend([1/val_ml])
else:
ml.extend([1])
box=int(3*rsigma)
if box < 3:
box=3
kernel=np.zeros([1,2*box+1])
norm=0
flux_c.extend([0])
for j in range(0, 2*box+1):
gaus=np.exp(-0.5*(((j-box)/rsigma)**2))
kernel[0,j]=gaus
norm=norm+gaus
kernel=kernel/norm;
pdl_flux_c_conv = convolve2d(pdl_flux_c_ini,kernel,mode='same')
pdl_flux_c = pdl_flux_c_conv[iii,:]
out_spec_pdl = interp1d(wave_c, pdl_flux_c,bounds_error=False,fill_value=0.)(wave_unc)
n_c_out=out_spec_pdl.shape
error=[]
for i in range(0,n_unc):
val=out_spec_pdl[i]
if np.isnan(val):
val=0
model[i][iii]=val*masked[i]
model_no_mask[i][iii]=val
if masked[i] > 0:
error.extend([0.01*abs(e_flux_unc[i])])
else:
error.extend([0])
pdl_C_input=np.zeros(nf)
pdl_model=np.zeros([n_unc,nf])
pdl_model_good=np.zeros([n_unc,nf])
pdl_model_no_mask=np.zeros([n_unc,nf])
pdl_error=np.zeros(n_unc)
pdl_masked=masked
pdl_dust=np.zeros([n_unc,nf])
for j in range(0, nf):
for i in range(0, n_unc):
wave_res=wave_unc[i]/(1+redshift)
dust_rat=my.A_l(3.1,wave_res)
dust=10**(-0.4*Av[j]*dust_rat)
pdl_dust[i][j]=dust
val=model[i][j]*dust
pdl_model[i][j]=val
val_no_mask=model_no_mask[i][j]*dust
pdl_model_no_mask[i][j]=val_no_mask
e_val=error[i]
val_now=e_flux_unc[i]
if val_now == 0:
val_now=1
pdl_error[i]=1.0/(abs(val_now)**2)
pdl_flux_masked=flux_masked
#######################################################
# LINEAR GUESS
j_iter=0
n_iter=n_mc
coeffs_iter=np.zeros([nf,n_iter])
for j_iter in range(0, n_iter):
pdl_gr=ran.randn(n_unc)
# pdl_gr->inplace->clip(-1,1);
pdl_noise=np.sqrt(1/pdl_error)*pdl_gr
pdl_flux_to_fit=pdl_flux_masked+pdl_noise
[y_model_now, coeffs] = linfit1d(pdl_flux_to_fit,pdl_model,1/pdl_error)
pdl_1st_model=y_model_now
#
# We remove the models that are negative
#
nf_new=0
nf_neg=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
nf_new=nf_new+1
else:
nf_neg=nf_neg+1
MOD=[]
if nf_new > 0:
while nf_neg > 0:
pdl_model_new=np.zeros([n_unc,nf_new])
nf_i=0;
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
pdl_model_new[:,nf_i] = pdl_model[:,k]
MOD.extend([k])
nf_i=nf_i+1
else:
coeffs[k][0]=0
[yfit, coeffs_new] = linfit1d(pdl_flux_to_fit,pdl_model_new,1/pdl_error)
pdl_1st_model=yfit
y_model_now=yfit
nf_i=0
nf_neg=0
nf_new=0
for k in range(0, nf):
C=coeffs[k][0]
if C > 0:
val=coeffs_new[nf_i][0]
nf_i=nf_i+1
if val > 0:
coeffs[k][0]=val
nf_new=nf_new+1
else:
coeffs[k][0]=0
nf_neg=nf_neg+1
if nf_new == 0:
nf_neg=0
#for k in range(0, nf):
# C=coeffs[k][0]
else:
nf_new=nf
#for k in range(0, nf):
# C=coeffs[k,0]
coeffs_iter[:,j_iter] = coeffs[:,0]
#print coeffs_iter,j_iter
# End LINEAR GUESS
#######################################################
pdl_C_input = np.sum(coeffs_iter,axis=1)
#print pdl_C_input
sum_JUNK=np.sum(pdl_C_input)
pdl_C_input=pdl_C_input/sum_JUNK
pdl_C_input_zero=np.copy(pdl_C_input)
coeffs_iter=coeffs_iter/sum_JUNK
pdl_C_rms=np.copy(pdl_C_input)
for j in range(0, nf):
sec=coeffs_iter[j,:]
stats_sec1=np.std(sec)+np.mean(sec)
pdl_C_rms[j]=stats_sec1
min_C=1e12
max_C=0
for j in range(0, nf):
val_C=pdl_C_input_zero[j]
if val_C > 0:
if min_C < val_C:
min_C=val_C
if max_C > val_C:
max_C=val_C
coeffs=np.zeros([nf,3])
#
# We fit
#
ini_cat=0
pdl_C_flast=np.ones(nf)
fact_q=1
i_mc=0
y_model_end=np.zeros(n_unc)
for j_mc in range(0, n_mc):
C_left=1
if i_mc == 0:
fact_q=0
if i_mc == 1:
fact_q=1
i_mc=i_mc+1
pdl_random=ran.rand(nf)
pdl_grandom=ran.rand(nf)
pdl_random_J=nf*ran.rand(nf)
h_nf=1
jj=0
y_model_now=np.zeros(n_unc)
y_model_no_mask_now=np.zeros(n_unc)
sum_J=0
pdl_C_now=np.copy(pdl_random)
if i_mc > 1:
for j in range(0, nf):
val_random=pdl_random[j]
val_grandom=2*pdl_grandom[j]-1;
C_val_input=pdl_C_input[j]
C_val_zero=pdl_C_input_zero[j]
C_val=C_val_zero
C_rms=pdl_C_rms[j]
C_val=C_val+2*fact_q*C_rms
if C_val > 1:
C_val=1
if C_val < 0:
C_val=0
pdl_C_now[j]=C_val
else:
pdl_C_now=np.copy(pdl_C_input)
# We normalize!
sum_J=np.sum(pdl_C_now)
pdl_C_now=pdl_C_now/sum_J
# print '______A'
# print pdl_C_now
# print '______A'
for j in range(0, nf):
pdl_model_j=pdl_model[:,j]
pdl_model_no_mask_j=pdl_model_no_mask[:,j]
val=pdl_C_now[j]
y_model_now=y_model_now+val*pdl_model_j
y_model_no_mask_now=y_model_no_mask_now+val*pdl_model_no_mask_j
dim_now=y_model_now.shape
j1=int(0.47*n_unc)
j2=int(0.53*n_unc)
a_norm=[]
b_norm=[]
j_a=0
band=0
for j in range(j1, j2):
if band == 0:
a_norm.extend([flux_unc[j]])
b_norm.extend([y_model_now[j]])
band=1
else:
a_norm[j_a]=flux_unc[j]
b_norm[j_a]=y_model_now[j]
if a_norm[j_a] > 0 and b_norm[j_a] > 0:
j_a=j_a+1
band=0
med_b=np.median(b_norm)
if med_b != 0:
med_norm=np.median(a_norm)/np.median(b_norm)
else:
med_norm=1
MED_NORM.extend([med_norm])
y_model_now=y_model_now*med_norm
y_model_no_mask_now=y_model_no_mask_now*med_norm
##############################
# CHISQ VALUE
chi=0
chi2=0
NFREE=0
have_error=1
out_spec=np.zeros(n_unc)
chi_sec=np.zeros(n_unc)
ran.seed(None)
pdl_rand_noise=2*ran.rand(n_unc)-1
for j in range(0, n_unc):
out_spec[j]=y_model_now[j]
chi_sec[j]=0
if flux_unc[j] != 0 and out_spec[j] != 0 and e_flux_unc[j] != 0:
rnd=0
chi=chi+masked[j]*((flux_masked[j]+rnd-out_spec[j])**2)/(e_flux_unc[j])**2
if have_error == 0:
chi_sec[j]=masked[j]*((flux_unc[j]+rnd-out_spec[j])**2)/abs(out_spec[j])
else:
chi_sec[j]=masked[j]*((flux_unc[j]+rnd-out_spec[j])**2)/(e_flux_unc[j])**2
NFREE=NFREE+1
chi_sq=chi
if NFREE > 0:
chi_sq=(chi_sq/(NFREE))**0.5
out_ps_now="junkMC1";
title="X = "+str(chi_sq)+" Q="+str(fact_q)
plot_results(plot,wave_unc,[flux_unc,out_spec,flux_unc-out_spec,y_model_end,e_flux_unc],out_ps_now,title)
if chi_sq < 1.1*chi_sq_min_now:
pdl_C_input=pdl_C_now
fact_q=0.95*fact_q
j_mc=0
if fact_q < 0.05 and i_mc > 1:
j_mc=n_mc
if chi_sq < chi_sq_min_now:
chi_sq_min_now=chi_sq
coeffs[:,0] = pdl_C_now
sum_JUNK=np.sum(pdl_C_now)
y_model_end=y_model_now
y_model_no_mask_end=y_model_no_mask_now
nf_1=nf-1
#print '______A1'
#print pdl_C_now[0:nf]
#print '______A1'
coeffs_cat[0:nf,ini_cat]=pdl_C_now[0:nf]
coeffs_cat[nf,ini_cat]=chi_sq
pdl_model_spec_cat[:,ini_cat]=y_model_no_mask_end
ini_cat=ini_cat+1
if ini_cat > n_mc-2:
j_mc=n_mc
#
# We construct the average model
# and the average coefficients
#
#print '____B'
#print coeffs
#print '____B'
#print coeffs_cat
#print '____C'
model_spec=np.zeros(n_unc)
res_spec=np.zeros(n_unc)
model_spec_min=np.zeros(n_unc)
out_spec_now=np.zeros(n_unc)
SUM_W=0
out_coeffs=np.zeros([nf,1])
out_coeffs_e=[]
N_CASES=0
pdl_model_final=np.zeros(n_unc)
for J in range(0, ini_cat):
CHI=coeffs_cat[nf,J]
if CHI < 1.1*chi_sq_min_now:
for j in range(0, n_unc):
out_spec_now[j]=out_spec_now[j]+(pdl_model_spec_cat[j,J])/CHI
if N_CASES > 0:
out_coeffs=np.concatenate((out_coeffs,np.zeros([nf, 1])),axis=1)
for j in range(0, nf):
val=coeffs_cat[j,J]
out_coeffs[j][N_CASES]=val
N_CASES=N_CASES+1
SUM_W=SUM_W+1/CHI
if SUM_W == 0:
SUM_W=1
#
# No better solution found than the 1st one!!!
#
for j in range(0, n_unc):
val=pdl_1st_model[j]
model_spec[j]=val;
out_spec[j]=val;
res_spec[j]=flux_unc[j]-model_spec[j]
model_spec_min[j]=model_spec[j]
else:
for j in range(0, n_unc):
model_spec[j]=out_spec_now[j]/SUM_W
out_spec[j]=out_spec_now[j]/SUM_W
res_spec[j]=flux_unc[j]-model_spec[j]
model_spec_min[j]=model_spec[j]
min_coeffs=coeffs
for j in range(0, nf):
tmp=np.zeros(N_CASES)
for J in range(0, N_CASES):
tmp[J]=out_coeffs[j][J]
val=np.mean(tmp)
sigma=np.std(tmp)
sigma_MC=pdl_C_rms[j]
sigma=np.sqrt(sigma**2+sigma_MC**2)
sum_C=np.sum(pdl_C_input)
old_val=coeffs[j,0]
coeffs[j,0]=val
coeffs[j,1]=sigma
coeffs[j,2]=old_val
chi_sq=chi_sq_min_now
if chi_sq < MIN_CHISQ:
MIN_CHISQ=chi_sq
age_min=0
met_min=0
Av_min=0
age_min_mass=0
met_min_mass=0
Av_min_mass=0
norm_C=0
norm_C_mass=0
for k in range(0, nf):
dust=10**(-0.4*Av[k]*dust_rat)
C=coeffs[k,0]
norm_C=norm_C+C
norm_C_mass=norm_C_mass+C*ml[k]
for k in range(0, nf):
C=coeffs[k,0]
#if norm_C > 0:
coeffs_N[k,0]=C/norm_C
#if norm_C_mass > 0:
coeffs_NM[k,0]=C/norm_C_mass
if norm_C > 0:
age_min=age_min+C*np.log10(age_mod[k])/norm_C
met_min=met_min+C*met_mod[k]/norm_C
Av_min=Av_min+C*Av[k]/norm_C
CN=C/norm_C
C_now=C*med_norm
if norm_C_mass > 0:
age_min_mass=age_min_mass+C*np.log10(ml[k]*age_mod[k])/norm_C_mass
met_min_mass=met_min_mass+C*ml[k]*met_mod[k]/norm_C_mass
Av_min_mass=Av_min_mass+C*ml[k]*Av[k]/norm_C_mass
age_min=10**(age_min)
age_min_mass=10**(age_min_mass)
#name=unc_file+", "
scale="1"
pdl_age_mod=np.array(age_mod)
pdl_met_mod=np.array(met_mod)
pdl_ml=np.array(ml)
pdl_Av=np.array(Av)
pdl_model_spec_min=np.array(model_spec_min)
pdl_res=np.array(res_spec)
out_ps_now="junkMC2"
title="X="+str(chi_sq)+" Av="+str(Av[0])+" z="+str(redshift)+" sigma="+str(sigma)
if plot > 1:
plot_results(plot,wave_unc,[pdl_flux_masked,pdl_model_spec_min,pdl_res],out_ps_now,title)
return [chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_model_spec_min,pdl_res,pdl_C_input_zero,pdl_C_rms]
def fit_elines_grad_rnd_new_guided(pdl_wave,pdl_flux,pdl_e_flux,n_mod,chi_goal,d_chi_goal,typef,a,ia,a0,a1,link,n_rnd,pdl_masked,deft,SCALE_INI,g_v,g_d):
a_out=np.zeros([n_mod, 9])
SCALE=SCALE_INI
n_mc=n_rnd
cont=0
dims=pdl_flux.shape
if len(dims) == 1:
NX=dims[0]
# First Guess
n_points=np.sum(pdl_masked)
pdl_model_0=np.zeros(NX)
pdl_model_cont_0=np.zeros(NX)
for i in range(0, n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a)
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
chi_sq_ini=chi_sq
a_out=copy_a(n_mod,a)
a_now=np.zeros([n_mod,9])
pdl_rnd=ran.randn(n_mod*9*n_mc)
pdl_rnd_lin=0.8+0.4*ran.rand(n_mod*9*n_mc)
#
# 1st we derive the redshift!
#
if g_v == 0:
new_mc=3
else:
new_mc=int(n_mc/2.0)
for ii in range(0, new_mc):
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i)+(9*n_mod*ii))]#*ii]
rnd_lin=pdl_rnd_lin[(j+(9*i)+(9*n_mod*ii))]#*ii]
if ia[i][j] == 1:
if link[i][j] == -1:
if typef[i] != "poly1d\n":
if j == 3:
a_now[i][j]=a0[i][j]+rnd_lin*(a1[i][j]-a0[i][j])#*ii/new_mc
else:
a_now[i][j]=a_out[i][j]#+$SCALE*$a_out[$i][$j]*$rnd;
else:
a_now[i][j]=a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < a0[i][j]:
a_now[i][j]=a0[i][j]
if a_now[i][j] > a1[i][j]:
a_now[i][j]=a1[i][j]
else:
k=link[i][j]-1
method=a1[i][j]
if method == 0:
a_now[i][j]=a_now[k][j]+a0[i][j]
else:
a_now[i][j]=a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
if n_mod_free == 0:
pdl_model_n=pdl_tmp
else:
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
else:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
ik=link[i1][1]-1
t=pdl_model_n[ik,:]
pdl_tmp=pdl_tmp*a0[i1][1]
pdl_model_n[ik,:]= t +pdl_tmp
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
pdl_tmp=create_single_model_poly(pdl_wave,j)
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
pdl_grandom=ran.randn(NX)
pdl_flux_fit=pdl_flux-cont+pdl_grandom*pdl_e_flux
dim_n=pdl_model_n.shape
if dim_n[1] > 1:
[pdl_model_0, coeffs_0] = linfit1d(pdl_flux_fit,pdl_model_n)#,{Weights=>1/$pdl_e_flux});#,{Weights=>1/$pdl_e_flux});
else:
pdl_model_n[np.where(pdl_model_n == 0)[0]]=1.0
pdl_rat=pdl_flux_fit/pdl_model_n
pdl_rat[np.isnan(pdl_rat)]=0
statst=my.stats(pdl_rat)
coeffs_0=np.ones(1)
pdl_model_0=statst[2]*pdl_model_n
coeffs_0=statst[2]*coeffs_0
pdl_model_0=pdl_model_0+cont
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
a_now[i1][1]=coeffs_0[n_mod_free]
if a_now[i1][1] < a0[i1][1]:
a_now[i1][1]=a0[i1][1]
if a_now[i1][1] > a1[i1][1]:
a_now[i1][1]=a1[i1][1]
n_mod_free=n_mod_free+1
else:
k=link[i1][1]-1
method=a1[i1][1]
if method == 0:
a_now[i1][1]=a_now[k][1]+a0[i1][1]
else:
a_now[i1][1]=a_now[k][1]*a0[i1][1]
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
val=coeffs_0[n_mod_free]
a_now[i1][j]=val
n_mod_free=n_mod_free+1
pdl_model_0=np.zeros(NX)
pdl_model_cont_0=np.zeros(NX)
for i in range(0, n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a_now)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
pdl_a_now=a_now
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq <= chi_sq_ini:
a_out=copy_a(n_mod,a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
chi_sq_ini=1e12
a_now=copy_a(n_mod,a_out)
#
# 2nd we derive the sigma
#
if g_d == 0:
new_mc=3
else:
new_mc=int(n_mc)
for ii in range(0, new_mc):
#
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i)+(9*n_mod*ii))]#*$ii);
rnd_lin=pdl_rnd_lin[(j+(9*i)+(9*n_mod*ii))]#*$ii);
if ia[i][j] == 1:
if link[i][j] == -1:
if typef[i] != "poly1d\n":
if j == 2:
a_now[i][j]=a0[i][j]+rnd_lin*(a1[i][j]-a0[i][j])#*$ii/$new_mc
else:
a_now[i][j]=a_out[i][j]
else:
a_now[i][j]=a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < a0[i][j]:
a_now[i][j]=a0[i][j]
if a_now[i][j] > a1[i][j]:
a_now[i][j]=a1[i][j]
else:
k=link[i][j]-1
method=a1[i][j]
if method == 0:
a_now[i][j]=a_now[k][j]+a0[i][j]
else:
a_now[i][j]=a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
if n_mod_free == 0:
pdl_model_n=pdl_tmp
else:
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
else:
pdl_tmp=create_single_model_one(pdl_wave,i1,typef,a_now)
i2=link[i1][1]-1
t=pdl_model_n[i2,:]
pdl_tmp=pdl_tmp*a0[i1][1]
pdl_model_n[i2,:]= t +pdl_tmp
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
pdl_tmp=create_single_model_poly(pdl_wave,j)
pdl_model_n=np.vstack((pdl_model_n,pdl_tmp))
n_mod_free=n_mod_free+1
pdl_grandom=ran.randn(NX)
pdl_flux_fit=pdl_flux-cont+pdl_grandom*pdl_e_flux
dim_n=pdl_model_n.shape
if dim_n[1] > 1:
[pdl_model_0, coeffs_0] = linfit1d(pdl_flux_fit,pdl_model_n)
else:
pdl_model_n[np.where(pdl_model_n == 0)[0]]=1.
pdl_rat=pdl_flux_fit/pdl_model_n
pdl_rat[np.isnan(pdl_rat)]=0
statst=my.stats(pdl_rat)
coeffs_0=np.ones(1)
pdl_model_0=statst[2]*pdl_model_n
coeffs_0=statst[2]*coeffs_0
pdl_model_0=pdl_model_0+cont
n_mod_free=0
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
a_now[i1][1]=coeffs_0[n_mod_free]
n_mod_free=n_mod_free+1
else:
k=link[i1][1]-1
method=a1[i1][1]
if method == 0:
a_now[i1][1]=a_now[k][1]+a0[i1][1]
else:
a_now[i1][1]=a_now[k][1]*a0[i1][1]
if typef[i1] == "poly1d\n":
for j in range(0, 9):
if ia[i1][j] == 1:
val=coeffs_0[n_mod_free]
a_now[i1][j]=val
n_mod_free=n_mod_free+1
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
pdl_a_now=a_now
pdl_chi_now=pdl_masked*((pdl_flux-pdl_model_0[:,0])**2.0)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq <= chi_sq_ini:
a_out=copy_a(n_mod,a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
chi_sq_ini=1e12
a_now=copy_a(n_mod,a_out)
# We start the fitting loop!
ii=0
SCALE_IN=SCALE
new_mc=int(n_mc/3)
while ii < new_mc:
#
# We change slighly the parameters
#
for i in range(0, n_mod):
for j in range(0, 9):
rnd=pdl_rnd[(j+(9*i)+(9*n_mod*ii))]#*$ii)
rnd_lin=pdl_rnd_lin[(j+(9*i)+(9*n_mod*ii))]#*$ii);
if ia[i][j] == 1:
if link[i][j] == -1:
A1=a1[i][j]
A0=a0[i][j]
if a1[i][j] > 1.3*a_out[i][j]:
A1=1.3*a_out[i][j]
if a0[i][j] < 0.7*a_out[i][j]:
A0=0.7*a_out[i][j]
if typef[i] == "eline\n":
if j == 3 or j == 2:
a_now[i][j]=a_out[i][j]+SCALE*rnd*(A1-A0)/(5*new_mc)
else:
a_now[i][j]=a_out[i][j]+SCALE*a_out[i][j]*rnd
if a_now[i][j] < A0:
a_now[i][j]=A0
if a_now[i][j] > A1:
a_now[i][j]=A1
else:
# No variation!
a_now[i][j]=a_out[i][j]#+$SCALE*$a_out[$i][$j]*$rnd*0.0001;
else:
k=link[i][j]-1
method=a1[i][j]
if method == 0:
a_now[i][j]=a_now[k][j]+a0[i][j]
else:
a_now[i][j]=a_now[k][j]*a0[i][j]
else:
a_now[i][j]=a_out[i][j]
for i1 in range(0, n_mod):
if typef[i1] == "eline\n":
if ia[i1][1] == 1:
if link[i1][1] == -1:
if a_now[i1][1] < a0[i1][1]:
a_now[i1][1]=a0[i1][1]
if a_now[i1][1] > a1[i1][1]:
a_now[i1][1]=a1[i1][1]
# MC test
pdl_model_0=np.zeros(NX)
pdl_model_cont_0=np.zeros(NX)
for i in range(0, n_mod):
pdl_tmp=create_single_model(pdl_wave,i,typef,a_now)
pdl_model_0=pdl_model_0+pdl_tmp
pdl_model_cont_0=create_single_model(pdl_wave,(n_mod-1),typef,a_now)
########################
# 2014.11.11 LAST CHANGE
#
pdl_grandom=ran.randn(NX)
pdl_flux_fit=pdl_flux-cont+pdl_grandom*pdl_e_flux
pdl_chi_now=pdl_masked*((pdl_flux_fit-pdl_model_0)**2)/(pdl_e_flux**2)
chi_sq=np.sum(pdl_chi_now)
chi_sq=(chi_sq/(n_points-n_mod-1))**0.5
if chi_sq < chi_sq_ini:
a_out=copy_a(n_mod,a_now)
chi_sq_ini=chi_sq
pdl_model=pdl_model_0
pdl_model_cont=pdl_model_cont_0
SCALE=SCALE*0.99
if SCALE < 0.1*SCALE_IN:
SCALE=SCALE_IN*0.1
else:
SCALE=SCALE_IN
if abs(chi_sq-chi_sq_ini) < d_chi_goal or chi_sq_ini < chi_sq_goal:
ii=n_mc
ii=ii+1
chi_sq_now=chi_sq_ini
return [a_out,chi_sq_now,pdl_model]
|
Delosari/dazer
|
bin/lib/ssp_functions/ssp_Hector_Fit3D_tools.py
|
Python
|
mit
| 81,907
|
[
"Gaussian"
] |
d794bd23aa7d7265097dc2c20c511ef5e16fd44f7d49a8d272b096a7694b34a1
|
from setuptools import setup, find_packages
import imp
version = imp.load_source('muda.version', 'muda/version.py')
setup(
name='muda',
version=version.version,
description='Musical data augmentation',
author='Brian McFee',
author_email='brian.mcfee@nyu.edu',
url='http://github.com/bmcfee/muda',
download_url='http://github.com/bmcfee/muda/releases',
packages=find_packages(),
package_data={'': ['deformers/data/*']},
long_description="""Musical data augmentation.""",
classifiers=[
"License :: OSI Approved :: ISC License (ISCL)",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords='audio music sound',
license='ISC',
install_requires=[
'librosa>=0.8',
'pyrubberband>=0.1',
'pandas',
'jams>=0.3.0',
'pysoundfile>=0.8',
'six',
'jsonpickle',
],
extras_require={
'docs': ['numpydoc'],
'tests': ['pytest < 4', 'pytest-cov==2.9.0'],
}
)
|
bmcfee/muda
|
setup.py
|
Python
|
isc
| 1,475
|
[
"Brian"
] |
751dfb95adc4937059153d408bf197a8dbcd74e44a276bffcdaa0296a6d077f2
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bowtie(MakefilePackage):
"""Bowtie is an ultrafast, memory efficient short read aligner
for short DNA sequences (reads) from next-gen sequencers."""
homepage = "https://sourceforge.net/projects/bowtie-bio/"
url = "https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.1.1/bowtie-1.2.1.1-src.zip"
version('1.2.1.1', 'ec06265730c5f587cd58bcfef6697ddf')
variant("tbb", default=False, description="Use Intel thread building block")
depends_on("tbb", when="+tbb")
def edit(self, spec, prefix):
# FIXME: Edit the Makefile if necessary
# FIXME: If not needed delete this function
# makefile = FileFilter('Makefile')
# makefile.filter('CC = .*', 'CC = cc')
return
|
EmreAtes/spack
|
lib/spack/docs/tutorial/examples/Makefile/1.package.py
|
Python
|
lgpl-2.1
| 2,011
|
[
"Bowtie"
] |
79d62789cb99a8588c840d07523dc8c10406f2d947348c8c9836a0a798fabf01
|
import sys
import getopt
import random
import time
import numpy
import math
import arcpy
import csv
from arcpy import env
import my_tools as tools
# Efficiency: 0' 10'' importing packages
# 6' 17'' retrieving day data for January 2010 (500,000 rows read, 1,326 rows/sec), assembly of shapefiles and merging to month file
# 1' 06'' density analysis, simulating density in 99 random distributions of crime (0.6 secs/simulation) and envelope hypothesis testing
# Note: file names of ArcGIS datasets cannot have more than 13 characters
try:
opts = getopt.getopt(sys.argv[1:],"m:",["model="])
except getopt.GetoptError:
print '\nUsage: Exercise3.py -m <true|false>'
sys.exit(2)
if len(opts[0]) == 0:
print '\nWarning: \'model\' argument could not be solved. Empirical density will be computed but not modeled'
modeling_process = False
elif opts[0][0][1] in ['true','True','TRUE']:
print '\nWarning: \'model\' argument set to True. Empirical density will be computed and modeled'
modeling_process = True
elif opts[0][0][1] in ['false','False','FALSE']:
print '\nWarning: \'model\' argument set to False. Empirical density will be computed but not modeled'
modeling_process = False
else:
print '\nWarning: \'model\' argument could not be solved. Empirical density will be computed but not modeled'
modeling_process = False
arcpy.CheckOutExtension("Spatial")
env.overwriteOutput = True
env.workspace = "C:\Python27\ArcGIS10.5\data\chicago" # Set workspace
env.scratchWorkspace = "C:\Python27\ArcGIS10.5\data"
raw_data = "Chicago_crime_counts_2009-2011.csv"
root_name = "chicago_"
'''
Import raw 2010 data to shapefile
'''
year = 2010
segment = 5
progress = 1
sys.stdout.write("\nProcessing days of January, " + str(year) + ": ")
for year_day in range(1,32): # Only January 2010 is assessed
percentile = int(year_day/float(31)*100)
if percentile/segment >= progress:
sys.stdout.write(str(percentile/segment*segment) + '% ')
progress = percentile/segment + 1
root_yday_name = root_name + "D" + str(year_day)
with open(env.workspace + "\\" + root_yday_name + ".csv", 'w') as out_csv:
writer = csv.writer(out_csv, delimiter=',',quotechar='"')
writer.writerow(["ID","Case","Date","Type","X","Y","Lat","Long"])
with open(env.workspace + "\\" + raw_data, 'r') as raw_csv:
reader = csv.reader(raw_csv, delimiter=',', quotechar='"')
next(raw_csv)
for row in reader:
time_structure = time.strptime(row[2],"%m/%d/%Y %I:%M:%S %p") ## TO-DO: Print the object and study its data
if time_structure[0] == year and time_structure[7] == year_day and row[3] == 'NARCOTICS':
writer.writerow(row)
arcpy.MakeXYEventLayer_management(env.workspace + "\\" + root_yday_name + ".csv", "X", "Y", root_yday_name, "", "")
arcpy.FeatureClassToFeatureClass_conversion(root_yday_name, env.workspace, root_yday_name)
'''
Merge weekly crime data by month
'''
map_names = arcpy.ListFeatureClasses(root_name + "D*")
month_length = [31,28,31,30,31,30,31,31,30,31,30,31]
yday = 1
month = 1
while yday < 32: # Only January 2010 is assessed
maps_to_merge = []
indexes_to_use = range(yday-1,yday-1+month_length[month-1])
for i in indexes_to_use:
maps_to_merge.append(map_names[i])
arcpy.Merge_management(maps_to_merge,root_name + "M" + str(month))
yday += month_length[month-1]
month += 1
'''
Compute monthly density and significance of local density singals with Monte Carlo simulation
'''
foot = 0.3048
cell_size = 300/foot # Side cell resolution
random.seed(time.time())
chicago_neighborhoods = 'Neighborhoods_2012b.shp' # Polygon shapefile with representing the extent of the study area
city_extent = arcpy.Dissolve_management(chicago_neighborhoods,"chicago_extent")
city_extent_rast = arcpy.FeatureToRaster_conversion(city_extent, "Id", "chicago_ext", cell_size)
n_simulations = 999 # Ideally 199 at least for two-tailed alpha = 0.01
alpha = 0.01 # Portion in the envelope of simulated intensities to be deemed anomalous, as alpha == percent/100
mbr_poly = arcpy.MinimumBoundingGeometry_management(city_extent, "chicago_mbr", "ENVELOPE", "ALL")
mbr_rast = arcpy.FeatureToRaster_conversion(mbr_poly, "Id", "chicago_mbr", cell_size)
min_x = float(arcpy.GetRasterProperties_management(mbr_rast,"LEFT").getOutput(0))
max_x = float(arcpy.GetRasterProperties_management(mbr_rast,"RIGHT").getOutput(0))
min_y = float(arcpy.GetRasterProperties_management(mbr_rast,"BOTTOM").getOutput(0))
max_y = float(arcpy.GetRasterProperties_management(mbr_rast,"TOP").getOutput(0))
n_cols = int(arcpy.GetRasterProperties_management(mbr_rast,"COLUMNCOUNT").getOutput(0))
n_rows = int(arcpy.GetRasterProperties_management(mbr_rast,"ROWCOUNT").getOutput(0))
n_cells = n_cols * n_rows
envelope = numpy.zeros((n_simulations+1,n_cells))
for month in range(1,2): # Only January 2010 is assessed
'''
Compute local point density with 2-dimensional isotropic Gaussian kernel of 1000-meter radius
'''
input_data = root_name + "M" + str(month) + ".shp"
n_monthly_events = arcpy.GetCount_management(input_data)
population_field = "NONE"
output_intensity = arcpy.sa.KernelDensity(env.workspace + "\\" + input_data, population_field, cell_size,1000/foot,'SQUARE_FEET')
output_map = "lambda_M" + str(month)
output_intensity.save(env.workspace + "\\" + output_map)
if modeling_process:
'''
Test anomaly of empirical local density compared to random distributions of crime events
'''
sys.stdout.write("\n\nProcessing simulations: ")
segment = 5
progress = 1
for simulation in range(1,n_simulations+1):
percentile = int(simulation/float(n_simulations)*100)
if percentile/segment >= progress:
sys.stdout.write(str(percentile/segment*segment) + '% ')
progress = percentile/segment + 1
arcpy.CreateRandomPoints_management(env.workspace,"simulation",city_extent,"",n_monthly_events)
sim_intensity = arcpy.sa.KernelDensity(env.workspace + "\\" + 'simulation.shp', population_field, cell_size, 1000/foot, 'SQUARE_FEET')
envelope[simulation] = arcpy.RasterToNumPyArray(sim_intensity,arcpy.Point(min_x, min_y),n_cols,n_rows).ravel() # Append simulated intensity to the envelope of posible simulations
envelope[-1] = arcpy.RasterToNumPyArray(output_intensity,arcpy.Point(min_x, min_y),n_cols,n_rows).ravel() # Append empirical intensity to the envelope of posible simulations
pseudoalpha = numpy.zeros(n_cells) # For each cell in output map:
CSR_test = numpy.zeros(n_cells,dtype='int')
for cell in range(n_cells):
pseudoalpha[cell] = 1-sum(envelope[:,cell] < envelope[-1, cell])/float(n_simulations+1) # Compute the rank of the empirical intensity with regard to simulated intensities
if pseudoalpha[cell] <= float(alpha)/2 or pseudoalpha[cell] >= 1-float(alpha)/2: # Solve the test based on the 1-alpha confidence interval
CSR_test[cell] = 1
pseudoalpha = numpy.split(pseudoalpha,n_rows)
pseudoalpha = numpy.array(pseudoalpha)
pseudoalpha_raster = arcpy.NumPyArrayToRaster(pseudoalpha,
lower_left_corner=arcpy.Point(min_x, min_y),
x_cell_size=cell_size)
pseudoalpha_raster = pseudoalpha_raster + city_extent_rast
pseudoalpha_raster.save("pseudoalpha")
CSR_test = numpy.split(CSR_test,n_rows)
CSR_test = numpy.array(CSR_test)
CSR_test_raster = arcpy.NumPyArrayToRaster(CSR_test,
lower_left_corner=arcpy.Point(min_x, min_y),
x_cell_size=cell_size)
CSR_test_raster = CSR_test_raster + city_extent_rast
CSR_test_raster.save("CSR_test")
raster_table = arcpy.BuildRasterAttributeTable_management(CSR_test_raster, "Overwrite")
n_cells = 0
with arcpy.da.SearchCursor(raster_table, ['VALUE', 'COUNT']) as cursor:
for row in cursor:
n_cells += row[1]
if row[0] == 1:
alternative_space = row[1]
print '\n\nFrequency of anomalous space (alpha = ' + str(alpha) + ' | simulations = ' + str(n_simulations) + '): ' + str(round(alternative_space/float(n_cells),4))
print '\nDone'
|
blt2589/Chicago
|
Exercise3.py
|
Python
|
mit
| 9,303
|
[
"Gaussian"
] |
cbbd00799368ead8a823dbe076a1e72ef54c4ea16754540b7e37542060d2ff04
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 02:55:49 2018
@author: Yongsoo Yang, UCLA Physics & Astronomy
yongsoo.ysyang@gmail.com
"""
from .fparameters_python import fparameters_python
import numpy as np
# main function for calculating R1 factor
def calc_R1_function_python(atomPos,atomType,Projections,Angles,Resolution,CropHalfWidth,VolSize,BF_Array,HTFact_Array,Axis_array):
# rename variables
currPos = atomPos
currAtom = atomType
currProjs = Projections
currAngles =Angles
volsize = VolSize
# initialize array
calcProjs = np.zeros(np.shape(currProjs))
# loop over all projections
for j in range(currAngles.shape[0]):
# calculate rotation matrix based on current input angles and axis convention
currMAT1 = MatrixQuaternionRot_python(np.array(Axis_array[0]),currAngles[j,0])
currMAT2 = MatrixQuaternionRot_python(np.array(Axis_array[1]),currAngles[j,1])
currMAT3 = MatrixQuaternionRot_python(np.array(Axis_array[2]),currAngles[j,2])
MAT = currMAT1*currMAT2*currMAT3;
# apply rotation matrix to the input atomic coordinates
Model = (np.transpose(MAT) * np.matrix(currPos)).A
# calculate projection based on the atomic model
cProj = My_create_volProj_from_model_python(Model, currAtom, HTFact_Array, BF_Array, volsize, Resolution, CropHalfWidth)
# determine shift value and crop indices for cropping the calculated projections
SizeDiff0 = cProj.shape[0] - currProjs.shape[0]
if SizeDiff0%2 == 0:
AddShift0 = SizeDiff0 / 2
else:
AddShift0 = (SizeDiff0-1) / 2
SizeDiff1 = cProj.shape[1] - currProjs.shape[1]
if SizeDiff1%2 == 0:
AddShift1 = SizeDiff1 / 2
else:
AddShift1 = (SizeDiff1-1) / 2
CropInd0 = np.arange(currProjs.shape[0])
CropInd0 = list(CropInd0 + int(AddShift0) - 1)
CropInd1 = np.arange(currProjs.shape[1])
CropInd1 = list(CropInd1 + int(AddShift1) - 1)
# crop the calculated projection
calcProjs[:,:,j] = cProj[np.ix_(CropInd0,CropInd1)]
# calculate R factor
Rfac = calcR_norm_YY_python(currProjs,calcProjs)
return [calcProjs,Rfac]
# main function for calculating R1 factor using individual atomic scattering factor
def calc_R1_function_indivFA_python(atomPos,atomType,Projections,Angles,Resolution,BF_Array,HTFact_Array,Axis_array,AtomicNumbers, zDir):
# rename variables
currPos = atomPos
currAtom = atomType
currProjs = Projections
currAngles =Angles
# volsize = VolSize
volsize = [int(np.max(np.shape(currProjs)) + 10)]
CropHalfWidth = int(np.round(np.sqrt(np.average(BF_Array)/8)/np.pi/Resolution*3)+1)
# initialize array
calcProjs = np.zeros(np.shape(currProjs))
# loop over all projections
for j in range(currAngles.shape[0]):
# calculate rotation matrix based on current input angles and axis convention
currMAT1 = MatrixQuaternionRot_python(np.array(Axis_array[0]),currAngles[j,0])
currMAT2 = MatrixQuaternionRot_python(np.array(Axis_array[1]),currAngles[j,1])
currMAT3 = MatrixQuaternionRot_python(np.array(Axis_array[2]),currAngles[j,2])
MAT = currMAT1*currMAT2*currMAT3;
# apply rotation matrix to the input atomic coordinates
Model = (np.transpose(MAT) * np.matrix(currPos)).A
# calculate projection based on the atomic model
cProj = My_create_volProj_from_model_indivFA_python(Model, currAtom, HTFact_Array, BF_Array, AtomicNumbers, volsize, Resolution, CropHalfWidth, zDir)
# determine shift value and crop indices for cropping the calculated projections
SizeDiff0 = cProj.shape[0] - currProjs.shape[0]
if SizeDiff0%2 == 0:
AddShift0 = SizeDiff0 / 2
else:
AddShift0 = (SizeDiff0-1) / 2
SizeDiff1 = cProj.shape[1] - currProjs.shape[1]
if SizeDiff1%2 == 0:
AddShift1 = SizeDiff1 / 2
else:
AddShift1 = (SizeDiff1-1) / 2
CropInd0 = np.arange(currProjs.shape[0])
CropInd0 = list(CropInd0 + int(AddShift0) - 1)
CropInd1 = np.arange(currProjs.shape[1])
CropInd1 = list(CropInd1 + int(AddShift1) - 1)
# crop the calculated projection
calcProjs[:,:,j] = cProj[np.ix_(CropInd0,CropInd1)]
# calculate R factor
Rfac = calcR_norm_YY_python(currProjs,calcProjs)
Rfac = np.round(100*Rfac,decimals=1)
return [calcProjs,Rfac]
# function for calculating projectons from the atomic model and H, B factors
def My_create_volProj_from_model_python(model, atomtype, Heights, Bfactors, volsize, Res, CropHalfWidth):
# rescale model based on pixel resolution
model = model / Res
# rescale peak heights and B factors
FHeights = Heights
FWidths = Bfactors / np.pi**2 / Res**2
# initialize xyz array for the volume
if len(volsize) == 3:
x = np.arange(volsize[0]) - np.round((volsize[0]+1)/2.) + 1
y = np.arange(volsize[1]) - np.round((volsize[1]+1)/2.) + 1
z = np.arange(volsize[2]) - np.round((volsize[2]+1)/2.) + 1
elif len(volsize) == 1:
x = np.arange(volsize[0]) - np.round((volsize[0]+1)/2.) + 1
y = x
z = x
else:
print('volsize should be either length 3 or length 1!')
# a variable for the projection size
sizeX = [len(x), len(y)]
# check if there's any atom outside the projection size
inInd = np.logical_and(np.logical_and(np.logical_and(np.logical_and (np.logical_and(model[0,:] >= np.min(x) , model[0,:] <= np.max(x)) ,
model[1,:] >= np.min(y)), model[1,:] <= np.max(y) ),
model[2,:] >= np.min(z)), model[2,:] <= np.max(z))
# take only the atoms inside the projection
calcModel = model[:,inInd]
calcAtomtype = atomtype[:,inInd]
# initialize projection array
finalProj_padded = np.zeros( (len(x) + (CropHalfWidth+1)*2, len(y) + (CropHalfWidth+1)*2, len(Heights)))
# proection center position
cenPos = np.round((np.array(finalProj_padded.shape)+1)/2.)
# local cropping indices for every atom
cropIndRef = np.arange(-CropHalfWidth,CropHalfWidth+1)
# meshgrid indices for local cropping
[cropX,cropY] = np.meshgrid(cropIndRef,cropIndRef)
cropX = cropX.T
cropY = cropY.T
#loop over all atoms in the model
for i in range(calcModel.shape[1]):
# obtain local cropping indices for current atom
currPos1 = calcModel[0:2,i] + cenPos[0:2]
currRndPos = np.round(currPos1)
cropInd1 = cropIndRef + currRndPos[0] -1
cropInd2 = cropIndRef + currRndPos[1] -1
# crop the local region for current atom
CropProj = finalProj_padded[np.ix_(list(cropInd1.astype(int)),list(cropInd2.astype(int)),list([calcAtomtype[0,i].astype(int)]))]
# sub-pixel position difference for current atom from the center pixel
diffPos = currPos1-currRndPos;
diffPosZ = calcModel[2,i] - np.round(calcModel[2,i])
# calculate Gaussian profile based on the H and B factor
gaussCalc = (FHeights[calcAtomtype[0,i]]*np.exp( -1.*( (cropX-diffPos[0])**2 + (cropY-diffPos[1])**2 ) / FWidths[calcAtomtype[0,i]] )).reshape(CropProj.shape)
gaussZcalc = (np.exp(-1.*(cropIndRef - diffPosZ)**2 / FWidths[calcAtomtype[0,i]] ))
# update the local region in the projection
finalProj_padded[np.ix_(list(cropInd1.astype(int)),list(cropInd2.astype(int)),list([calcAtomtype[0,i].astype(int)]))] = CropProj + gaussCalc*np.sum(gaussZcalc)
# initialize final projection array
finalProj_summed = np.zeros( (len(x), len(y)) )
# initialize Fourier indices
kx = np.arange(1,finalProj_summed.shape[0]+1)
ky = np.arange(1,finalProj_summed.shape[1]+1)
# apply Fourier resolution
MultF_X = 1./(len(kx)*Res)
MultF_Y = 1./(len(ky)*Res)
# initialize q vectors
CentPos = np.round((np.array(finalProj_summed.shape)+1)/2.)
[KX, KY] = np.meshgrid((kx-CentPos[0])*MultF_X,(ky-CentPos[1])*MultF_Y)
KX = KX.T
KY = KY.T
q2 = KX**2 + KY**2
# obtain the tabulated electron scattering form factor based on the atomic number
fa78 = fatom_vector_python( np.sqrt(q2),78 )
fa26 = fatom_vector_python( np.sqrt(q2),26 )
# average the electron scattering factor
fixedfa = 0.5*(fa78+fa26)
# loop over different type of atoms
for j in range(len(Heights)):
# crop to the original size image for current atom type
CVol = finalProj_padded[(CropHalfWidth+1):(-1-CropHalfWidth),(CropHalfWidth+1):(-1-CropHalfWidth),j]
# FFT
FVol = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(CVol)))
# apply the electron scattering factor
FVol = FVol * fixedfa.reshape(sizeX)
finalProj_summed =finalProj_summed+FVol
# obtain final projection by IFFT
Vol = np.real(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(finalProj_summed))))
return Vol
def My_create_volProj_from_model_indivFA_python(model, atomtype, Heights, Bfactors, AtomicNumbers, volsize, Res, CropHalfWidth, zDir):
# rescale model based on pixel resolution
model = model / Res
# rescale peak heights and B factors
FHeights = Heights
FWidths = Bfactors / np.pi**2 / Res**2
# initialize xyz array for the volume
if len(volsize) == 3:
x = np.arange(volsize[0]) - np.round((volsize[0]+1)/2.) + 1
y = np.arange(volsize[1]) - np.round((volsize[1]+1)/2.) + 1
z = np.arange(volsize[2]) - np.round((volsize[2]+1)/2.) + 1
elif len(volsize) == 1:
x = np.arange(volsize[0]) - np.round((volsize[0]+1)/2.) + 1
y = x
z = x
else:
print('volsize should be either length 3 or length 1!')
# a variable for the projection size
sizeX = [len(x), len(y)]
# check if there's any atom outside the projection size
inInd = np.logical_and(np.logical_and(np.logical_and(np.logical_and (np.logical_and(model[0,:] >= np.min(x) , model[0,:] <= np.max(x)) ,
model[1,:] >= np.min(y)), model[1,:] <= np.max(y) ),
model[2,:] >= np.min(z)), model[2,:] <= np.max(z))
# take only the atoms inside the projection
calcModel = model[:,inInd]
calcAtomtype = atomtype[:,inInd]
CropHalfWidth = int(CropHalfWidth)
# initialize projection array
finalProj_padded = np.zeros( (len(x) + (CropHalfWidth+1)*2, len(y) + (CropHalfWidth+1)*2, len(Heights)))
# proection center position
cenPos = np.round((np.array(finalProj_padded.shape)+1)/2.)
# local cropping indices for every atom
cropIndRef = np.arange(-CropHalfWidth,CropHalfWidth+1)
# meshgrid indices for local cropping
[cropX,cropY] = np.meshgrid(cropIndRef,cropIndRef)
cropX = cropX.T
cropY = cropY.T
#loop over all atoms in the model
for i in range(calcModel.shape[1]):
# obtain local cropping indices for current atom
if zDir == 2:
currPos1 = calcModel[0:2,i] + cenPos[0:2]
currRndPos = np.round(currPos1)
elif zDir == 1:
currPos1 = calcModel[[2, 0],i] + cenPos[0:2]
currRndPos = np.round(currPos1)
cropInd1 = cropIndRef + currRndPos[0] -1
cropInd2 = cropIndRef + currRndPos[1] -1
# crop the local region for current atom
CropProj = finalProj_padded[np.ix_(list(cropInd1.astype(int)),list(cropInd2.astype(int)),list([calcAtomtype[0,i].astype(int)]))]
# sub-pixel position difference for current atom from the center pixel
diffPos = currPos1-currRndPos;
if zDir == 2:
diffPosZ = calcModel[2,i] - np.round(calcModel[2,i])
elif zDir == 1:
diffPosZ = calcModel[1,i] - np.round(calcModel[1,i])
# calculate Gaussian profile based on the H and B factor
gaussCalc = (FHeights[calcAtomtype[0,i]]*np.exp( -1.*( (cropX-diffPos[0])**2 + (cropY-diffPos[1])**2 ) / FWidths[calcAtomtype[0,i]] )).reshape(CropProj.shape)
gaussZcalc = (np.exp(-1.*(cropIndRef - diffPosZ)**2 / FWidths[calcAtomtype[0,i]] ))
# update the local region in the projection
finalProj_padded[np.ix_(list(cropInd1.astype(int)),list(cropInd2.astype(int)),list([calcAtomtype[0,i].astype(int)]))] = CropProj + gaussCalc*np.sum(gaussZcalc)
# initialize final projection array
finalProj_summed = np.zeros( (len(x), len(y)) )
# initialize Fourier indices
kx = np.arange(1,finalProj_summed.shape[0]+1)
ky = np.arange(1,finalProj_summed.shape[1]+1)
# apply Fourier resolution
MultF_X = 1./(len(kx)*Res)
MultF_Y = 1./(len(ky)*Res)
# initialize q vectors
CentPos = np.round((np.array(finalProj_summed.shape)+1)/2.)
[KX, KY] = np.meshgrid((kx-CentPos[0])*MultF_X,(ky-CentPos[1])*MultF_Y)
KX = KX.T
KY = KY.T
q2 = KX**2 + KY**2
# loop over different type of atoms
for j in range(len(Heights)):
# crop to the original size image for current atom type
CVol = finalProj_padded[(CropHalfWidth+1):(-1-CropHalfWidth),(CropHalfWidth+1):(-1-CropHalfWidth),j]
# FFT
FVol = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(CVol)))
# obtain the tabulated electron scattering form factor based on the atomic number
currFA = fatom_vector_python( np.sqrt(q2),AtomicNumbers[j] )
# apply the electron scattering factor
FVol = FVol * currFA.reshape(sizeX)
finalProj_summed =finalProj_summed+FVol
# obtain final projection by IFFT
Vol = np.real(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(finalProj_summed))))
return Vol
# function for calculating the electron scattering factor based on the tabulated value
def fatom_vector_python(q,Z):
# get tabulated value based on atomic number
fpara = fparameters_python(Z)
# prepare the calculation variable based on the tabulated value
a = np.array([fpara[1], fpara[3], fpara[5]])
b = np.array([fpara[2], fpara[4], fpara[6]])
c = np.array([fpara[7], fpara[9], fpara[11]])
d = np.array([fpara[8], fpara[10], fpara[12]])
num =q.size
v = np.zeros((num))
q = q.reshape(num)
# calculate the electron scattering factor for each q vector
for hh in range(num):
#% Lorenzians %
suml = np.sum( a/((q[hh]**2)+b) )
#% Gaussians %
sumg = np.sum( c*np.exp(-(q[hh]**2)*d) )
v[hh] = suml + sumg
return v
# function for calculating R factor with least sqare normalization
def calcR_norm_YY_python(data1,data2):
# reshape array
data1 = data1.reshape((data1.size))
data2 = data2.reshape((data2.size))
if len(data1)!=len(data2):
print('data length does not match!\n')
R = -1
else:
# scale factor for least square noamlization
lscale = np.dot(data1,data2)/np.linalg.norm(data2)**2
# normalize
data2 = data2 * lscale
# calculate R factor
R = np.sum(np.abs(np.abs(data1)-np.abs(data2)))/np.sum(np.abs(data1))
return R
# function for obatining rotation matrix based on quaternion algorithm
def MatrixQuaternionRot_python(vector,theta):
theta = theta*np.pi/180;
vector = vector / np.linalg.norm(vector);
w = np.cos(theta/2.)
x = -np.sin(theta/2.)*vector[0]
y = -np.sin(theta/2)*vector[1]
z = -np.sin(theta/2)*vector[2]
RotM = [[1-2*y**2-2*z**2 , 2*x*y+2*w*z, 2*x*z-2*w*y],
[2*x*y-2*w*z, 1-2*x**2-2*z**2, 2*y*z+2*w*x],
[2*x*z+2*w*y, 2*y*z-2*w*x, 1-2*x**2-2*y**2]]
dd = np.matrix(np.array(RotM))
return dd
|
OpenChemistry/materialsdatabank
|
materialsdatabank/r1/calc_R1_function_python_GEN.py
|
Python
|
bsd-3-clause
| 15,663
|
[
"Gaussian"
] |
a55af264c2cd3ba2773de5fff7acba6e98b216576b7641b62804a6df27e38988
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test neurom._neuritefunc functionality."""
from pathlib import Path
from math import pi, sqrt
from mock import patch, Mock
import numpy as np
from nose import tools as nt
from numpy.testing import assert_allclose
import scipy
import neurom as nm
from neurom.features import neuritefunc as _nf
from neurom.features import sectionfunc as sectionfunc
from neurom.geom import convex_hull
from neurom.features.tests.utils import _close
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
H5_PATH = DATA_PATH / 'h5/v1'
SWC_PATH = DATA_PATH / 'swc'
SIMPLE = nm.load_neuron(Path(SWC_PATH, 'simple.swc'))
NRN = nm.load_neuron(Path(H5_PATH, 'Neuron.h5'))
def test_principal_direction_extents():
principal_dir = list(_nf.principal_direction_extents(SIMPLE))
assert_allclose(principal_dir,
(14.736052694538641, 12.105102672688004))
# test with a realistic neuron
nrn = nm.load_neuron(Path(H5_PATH, 'bio_neuron-000.h5'))
p_ref = [1672.9694359427331, 142.43704397865031, 226.45895382204986,
415.50612748523838, 429.83008974193206, 165.95410536922873,
346.83281498399697]
p = _nf.principal_direction_extents(nrn)
_close(np.array(p), np.array(p_ref))
def test_n_bifurcation_points():
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites[0]), 1)
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites[1]), 1)
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites), 2)
def test_n_forking_points():
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites[0]), 1)
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites[1]), 1)
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites), 2)
def test_n_leaves():
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites[0]), 2)
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites[1]), 2)
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites), 4)
def test_total_area_per_neurite():
def surface(r0, r1, h):
return pi * (r0 + r1) * sqrt((r0 - r1) ** 2 + h ** 2)
basal_area = surface(1, 1, 5) + surface(1, 0, 5) + surface(1, 0, 6)
ret = _nf.total_area_per_neurite(SIMPLE,
neurite_type=nm.BASAL_DENDRITE)
nt.assert_almost_equal(ret[0], basal_area)
axon_area = surface(1, 1, 4) + surface(1, 0, 5) + surface(1, 0, 6)
ret = _nf.total_area_per_neurite(SIMPLE, neurite_type=nm.AXON)
nt.assert_almost_equal(ret[0], axon_area)
ret = _nf.total_area_per_neurite(SIMPLE)
nt.ok_(np.allclose(ret, [basal_area, axon_area]))
def test_total_volume_per_neurite():
vol = _nf.total_volume_per_neurite(NRN)
nt.eq_(len(vol), 4)
# calculate the volumes by hand and compare
vol2 = [sum(sectionfunc.section_volume(s) for s in n.iter_sections())
for n in NRN.neurites
]
nt.eq_(vol, vol2)
# regression test
ref_vol = [271.94122143951864, 281.24754646913954,
274.98039928781355, 276.73860261723024]
nt.ok_(np.allclose(vol, ref_vol))
def test_neurite_volume_density():
vol = np.array(_nf.total_volume_per_neurite(NRN))
hull_vol = np.array([convex_hull(n).volume for n in nm.iter_neurites(NRN)])
vol_density = _nf.neurite_volume_density(NRN)
nt.eq_(len(vol_density), 4)
nt.ok_(np.allclose(vol_density, vol / hull_vol))
ref_density = [0.43756606998299519, 0.52464681266899216,
0.24068543213643726, 0.26289304906104355]
assert_allclose(vol_density, ref_density)
def test_neurite_volume_density_failed_convex_hull():
with patch('neurom.features.neuritefunc.convex_hull',
side_effect=scipy.spatial.qhull.QhullError('boom')):
vol_density = _nf.neurite_volume_density(NRN)
nt.ok_(vol_density, np.nan)
def test_terminal_path_length_per_neurite():
terminal_distances = _nf.terminal_path_lengths_per_neurite(SIMPLE)
assert_allclose(terminal_distances,
(5 + 5., 5 + 6., 4. + 6., 4. + 5))
terminal_distances = _nf.terminal_path_lengths_per_neurite(SIMPLE,
neurite_type=nm.AXON)
assert_allclose(terminal_distances,
(4. + 6., 4. + 5.))
def test_total_length_per_neurite():
total_lengths = _nf.total_length_per_neurite(SIMPLE)
assert_allclose(total_lengths,
(5. + 5. + 6., 4. + 5. + 6.))
def test_n_segments():
n_segments = _nf.n_segments(SIMPLE)
nt.eq_(n_segments, 6)
def test_n_neurites():
n_neurites = _nf.n_neurites(SIMPLE)
nt.eq_(n_neurites, 2)
def test_n_sections():
n_sections = _nf.n_sections(SIMPLE)
nt.eq_(n_sections, 6)
def test_neurite_volumes():
# note: cannot use SIMPLE since it lies in a plane
total_volumes = _nf.total_volume_per_neurite(NRN)
assert_allclose(total_volumes,
[271.94122143951864, 281.24754646913954,
274.98039928781355, 276.73860261723024]
)
def test_section_path_lengths():
path_lengths = list(_nf.section_path_lengths(SIMPLE))
assert_allclose(path_lengths,
(5., 10., 11., # type 3, basal dendrite
4., 10., 9.)) # type 2, axon
def test_section_term_lengths():
term_lengths = list(_nf.section_term_lengths(SIMPLE))
assert_allclose(term_lengths,
(5., 6., 6., 5.))
def test_section_bif_lengths():
bif_lengths = list(_nf.section_bif_lengths(SIMPLE))
assert_allclose(bif_lengths,
(5., 4.))
def test_section_end_distances():
end_dist = list(_nf.section_end_distances(SIMPLE))
assert_allclose(end_dist,
[5.0, 5.0, 6.0, 4.0, 6.0, 5.0])
def test_section_partition_pairs():
part_pairs = list(_nf.partition_pairs(SIMPLE))
assert_allclose(part_pairs,
[(1.0, 1.0), (1.0, 1.0)])
def test_section_bif_radial_distances():
bif_rads = list(_nf.section_bif_radial_distances(SIMPLE))
assert_allclose(bif_rads,
[5., 4.])
trm_rads = list(_nf.section_bif_radial_distances(NRN, neurite_type=nm.AXON))
assert_allclose(trm_rads,
[8.842008561870646,
16.7440421479104,
23.070306480850533,
30.181121708042546,
36.62766031035137,
43.967487830324885,
51.91971040624528,
59.427722328770955,
66.25222507299583,
74.05119754074926])
def test_section_term_radial_distances():
trm_rads = list(_nf.section_term_radial_distances(SIMPLE))
assert_allclose(trm_rads,
[7.0710678118654755, 7.810249675906654, 7.211102550927978, 6.4031242374328485])
trm_rads = list(_nf.section_term_radial_distances(NRN, neurite_type=nm.APICAL_DENDRITE))
assert_allclose(trm_rads,
[16.22099879395879,
25.992977561564082,
33.31600613822663,
42.721314797308175,
52.379508081911546,
59.44327819128149,
67.07832724133213,
79.97743930553612,
87.10434825508366,
97.25246040544428,
99.58945832481642])
def test_number_of_sections_per_neurite():
sections = _nf.number_of_sections_per_neurite(SIMPLE)
assert_allclose(sections,
(3, 3))
def test_section_branch_orders():
branch_orders = list(_nf.section_branch_orders(SIMPLE))
assert_allclose(branch_orders,
(0, 1, 1, # type 3, basal dendrite
0, 1, 1)) # type 2, axon
def test_section_bif_branch_orders():
bif_branch_orders = list(_nf.section_bif_branch_orders(SIMPLE))
assert_allclose(bif_branch_orders,
(0, # type 3, basal dendrite
0)) # type 2, axon
def test_section_term_branch_orders():
term_branch_orders = list(_nf.section_term_branch_orders(SIMPLE))
assert_allclose(term_branch_orders,
(1, 1, # type 3, basal dendrite
1, 1)) # type 2, axon
def test_section_radial_distances():
radial_distances = _nf.section_radial_distances(SIMPLE)
assert_allclose(radial_distances,
(5.0, sqrt(5**2 + 5**2), sqrt(6**2 + 5**2), # type 3, basal dendrite
4.0, sqrt(6**2 + 4**2), sqrt(5**2 + 4**2))) # type 2, axon
def test_local_bifurcation_angles():
local_bif_angles = list(_nf.local_bifurcation_angles(SIMPLE))
assert_allclose(local_bif_angles,
(pi, pi))
def test_remote_bifurcation_angles():
remote_bif_angles = list(_nf.remote_bifurcation_angles(SIMPLE))
assert_allclose(remote_bif_angles,
(pi, pi))
def test_partition():
partition = list(_nf.bifurcation_partitions(SIMPLE))
assert_allclose(partition,
(1.0, 1.0))
def test_partition_asymmetry():
partition = list(_nf.partition_asymmetries(SIMPLE))
assert_allclose(partition,
(0.0, 0.0))
partition = list(_nf.partition_asymmetries(SIMPLE, variant='length'))
assert_allclose(partition,
(0.0625, 0.06666666666666667))
nt.assert_raises(ValueError, _nf.partition_asymmetries, SIMPLE, variant='unvalid-variant')
def test_segment_lengths():
segment_lengths = _nf.segment_lengths(SIMPLE)
assert_allclose(segment_lengths,
(5.0, 5.0, 6.0, # type 3, basal dendrite
4.0, 6.0, 5.0)) # type 2, axon
def test_segment_areas():
result = _nf.segment_areas(SIMPLE)
assert_allclose(result,
[31.415927,
16.019042,
19.109562,
25.132741,
19.109562,
16.019042])
def test_segment_volumes():
expected = [
15.70796327,
5.23598776,
6.28318531,
12.56637061,
6.28318531,
5.23598776,
]
result = _nf.segment_volumes(SIMPLE)
assert_allclose(result, expected)
def test_segment_midpoints():
midpoints = np.array(_nf.segment_midpoints(SIMPLE))
assert_allclose(midpoints,
np.array([[0., (5. + 0) / 2, 0.], # trunk type 2
[-2.5, 5., 0.],
[3., 5., 0.],
[0., (-4. + 0) / 2., 0.], # trunk type 3
[3., -4., 0.],
[-2.5, -4., 0.]]))
def test_segment_radial_distances():
"""midpoints on segments."""
radial_distances = _nf.segment_radial_distances(SIMPLE)
assert_allclose(radial_distances,
[2.5, sqrt(2.5**2 + 5**2), sqrt(3**2 + 5**2), 2.0, 5.0, sqrt(2.5**2 + 4**2)])
def test_segment_path_lengths():
pathlengths = _nf.segment_path_lengths(SIMPLE)
assert_allclose(pathlengths, [5., 10., 11., 4., 10., 9.])
pathlengths = _nf.segment_path_lengths(NRN)[:5]
assert_allclose(pathlengths, [0.1, 1.332525, 2.530149, 3.267878, 4.471462])
def test_principal_direction_extents():
principal_dir = list(_nf.principal_direction_extents(SIMPLE))
assert_allclose(principal_dir,
(14.736052694538641, 12.105102672688004))
def test_section_taper_rates():
assert_allclose(list(_nf.section_taper_rates(NRN.neurites[0]))[:10],
[0.06776235492169848,
0.0588716599404923,
0.03791571485186163,
0.04674653812192691,
-0.026399800285566058,
-0.026547582897720887,
-0.045038414440432537,
0.02083822978267914,
-0.0027721371791201038,
0.0803069042861474],
atol=1e-4)
|
wizmer/NeuroM
|
neurom/features/tests/test_neuritefunc.py
|
Python
|
bsd-3-clause
| 13,695
|
[
"NEURON"
] |
cce9861850de4f6a40aad8310aa0794a08fb020518abd1686e979752c4b9bd75
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import pathlib
import sys
import wx
try:
import wx.lib.agw.hyperlink as hl
except ImportError:
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from invesalius.pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.gui.dialogs as dlg
import invesalius.project as proj
import invesalius.session as ses
from invesalius import inv_paths
BTN_MASK = wx.NewId()
BTN_PICTURE = wx.NewId()
BTN_SURFACE = wx.NewId()
BTN_REPORT = wx.NewId()
BTN_REQUEST_RP = wx.NewId()
WILDCARD_SAVE_3D = "Inventor (*.iv)|*.iv|"\
"PLY (*.ply)|*.ply|"\
"Renderman (*.rib)|*.rib|"\
"STL (*.stl)|*.stl|"\
"STL ASCII (*.stl)|*.stl|"\
"VRML (*.vrml)|*.vrml|"\
"VTK PolyData (*.vtp)|*.vtp|"\
"Wavefront (*.obj)|*.obj|"\
"X3D (*.x3d)|*.x3d"
INDEX_TO_TYPE_3D = {0: const.FILETYPE_IV,
1: const.FILETYPE_PLY,
2: const.FILETYPE_RIB,
3: const.FILETYPE_STL,
4: const.FILETYPE_STL_ASCII,
5: const.FILETYPE_VRML,
6: const.FILETYPE_VTP,
7: const.FILETYPE_OBJ,
8: const.FILETYPE_X3D}
INDEX_TO_EXTENSION = {0: "iv",
1: "ply",
2: "rib",
3: "stl",
4: "stl",
5: "vrml",
6: "vtp",
7: "obj",
8: "x3d"}
WILDCARD_SAVE_2D = "BMP (*.bmp)|*.bmp|"\
"JPEG (*.jpg)|*.jpg|"\
"PNG (*.png)|*.png|"\
"PostScript (*.ps)|*.ps|"\
"Povray (*.pov)|*.pov|"\
"TIFF (*.tiff)|*.tiff"
INDEX_TO_TYPE_2D = {0: const.FILETYPE_BMP,
1: const.FILETYPE_JPG,
2: const.FILETYPE_PNG,
3: const.FILETYPE_PS,
4: const.FILETYPE_POV,
5: const.FILETYPE_OBJ}
WILDCARD_SAVE_MASK = "VTK ImageData (*.vti)|*.vti"
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
backgroud_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(backgroud_colour)
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
# Fixed hyperlink items
tooltip = wx.ToolTip(_("Export InVesalius screen to an image file"))
link_export_picture = hl.HyperLinkCtrl(self, -1,
_("Export picture..."))
link_export_picture.SetUnderlines(False, False, False)
link_export_picture.SetBold(True)
link_export_picture.SetColours("BLACK", "BLACK", "BLACK")
link_export_picture.SetBackgroundColour(self.GetBackgroundColour())
link_export_picture.SetToolTip(tooltip)
link_export_picture.AutoBrowse(False)
link_export_picture.UpdateLink()
link_export_picture.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportPicture)
tooltip = wx.ToolTip(_("Export 3D surface"))
link_export_surface = hl.HyperLinkCtrl(self, -1,_("Export 3D surface..."))
link_export_surface.SetUnderlines(False, False, False)
link_export_surface.SetBold(True)
link_export_surface.SetColours("BLACK", "BLACK", "BLACK")
link_export_surface.SetBackgroundColour(self.GetBackgroundColour())
link_export_surface.SetToolTip(tooltip)
link_export_surface.AutoBrowse(False)
link_export_surface.UpdateLink()
link_export_surface.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportSurface)
#tooltip = wx.ToolTip(_("Export 3D mask (voxels)"))
#link_export_mask = hl.HyperLinkCtrl(self, -1,_("Export mask..."))
#link_export_mask.SetUnderlines(False, False, False)
#link_export_mask.SetColours("BLACK", "BLACK", "BLACK")
#link_export_mask.SetToolTip(tooltip)
#link_export_mask.AutoBrowse(False)
#link_export_mask.UpdateLink()
#link_export_mask.Bind(hl.EVT_HYPERLINK_LEFT,
# self.OnLinkExportMask)
#tooltip = wx.ToolTip("Request rapid prototyping services")
#link_request_rp = hl.HyperLinkCtrl(self,-1,"Request rapid prototyping...")
#link_request_rp.SetUnderlines(False, False, False)
#link_request_rp.SetColours("BLACK", "BLACK", "BLACK")
#link_request_rp.SetToolTip(tooltip)
#link_request_rp.AutoBrowse(False)
#link_request_rp.UpdateLink()
#link_request_rp.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkRequestRP)
#tooltip = wx.ToolTip("Open report tool...")
#link_report = hl.HyperLinkCtrl(self,-1,"Open report tool...")
#link_report.SetUnderlines(False, False, False)
#link_report.SetColours("BLACK", "BLACK", "BLACK")
#link_report.SetToolTip(tooltip)
#link_report.AutoBrowse(False)
#link_report.UpdateLink()
#link_report.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkReport)
# Image(s) for buttons
if sys.platform == 'darwin':
BMP_EXPORT_SURFACE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "surface_export_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "tool_photo_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask.png",
# wx.BITMAP_TYPE_PNG)
else:
BMP_EXPORT_SURFACE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "surface_export.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "tool_photo.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask_small.png",
# wx.BITMAP_TYPE_PNG)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_picture = pbtn.PlateButton(self, BTN_PICTURE, "",
BMP_TAKE_PICTURE,
style=button_style)
button_picture.SetBackgroundColour(self.GetBackgroundColour())
self.button_picture = button_picture
button_surface = pbtn.PlateButton(self, BTN_SURFACE, "",
BMP_EXPORT_SURFACE,
style=button_style)
button_surface.SetBackgroundColour(self.GetBackgroundColour())
#button_mask = pbtn.PlateButton(self, BTN_MASK, "",
# BMP_EXPORT_MASK,
# style=button_style)
#button_request_rp = pbtn.PlateButton(self, BTN_REQUEST_RP, "",
# BMP_IMPORT, style=button_style)
#button_report = pbtn.PlateButton(self, BTN_REPORT, "",
# BMP_IMPORT,
# style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=2, vgap=0)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (link_export_picture, 1, flag_link, 3),
(button_picture, 0, flag_button),
(link_export_surface, 1, flag_link, 3),
(button_surface, 0, flag_button),])
#(link_export_mask, 1, flag_link, 3),
#(button_mask, 0, flag_button)])
#(link_report, 0, flag_link, 3),
#(button_report, 0, flag_button),
#(link_request_rp, 1, flag_link, 3),
#(button_request_rp, 0, flag_button)])
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(fixed_sizer, 0, wx.GROW|wx.EXPAND)
# Update main sizer and panel layout
self.SetSizer(main_sizer)
self.Fit()
self.sizer = main_sizer
self.__init_menu()
def __init_menu(self):
menu = wx.Menu()
self.id_to_name = {const.AXIAL:_("Axial slice"),
const.CORONAL:_("Coronal slice"),
const.SAGITAL:_("Sagittal slice"),
const.VOLUME:_("Volume")}
for id in self.id_to_name:
item = wx.MenuItem(menu, id, self.id_to_name[id])
menu.Append(item)
self.menu_picture = menu
menu.Bind(wx.EVT_MENU, self.OnMenuPicture)
def OnMenuPicture(self, evt):
id = evt.GetId()
value = dlg.ExportPicture(self.id_to_name[id])
if value:
filename, filetype = value
Publisher.sendMessage('Export picture to file',
orientation=id, filename=filename, filetype=filetype)
def OnLinkExportPicture(self, evt=None):
self.button_picture.PopupMenu(self.menu_picture)
def OnLinkExportMask(self, evt=None):
project = proj.Project()
if sys.platform == 'win32':
project_name = project.name
else:
project_name = project.name+".vti"
dlg = wx.FileDialog(None,
"Save mask as...", # title
"", # last used directory
project_name, # filename
WILDCARD_SAVE_MASK,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(0) # default is VTI
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
extension = "vti"
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
filetype = const.FILETYPE_IMAGEDATA
Publisher.sendMessage('Export mask to file',
filename=filename,
filetype=filetype)
def OnLinkExportSurface(self, evt=None):
"OnLinkExportSurface"
project = proj.Project()
n_surface = 0
for index in project.surface_dict:
if project.surface_dict[index].is_shown:
n_surface += 1
if n_surface:
if sys.platform == 'win32':
project_name = pathlib.Path(project.name).stem
else:
project_name = pathlib.Path(project.name).stem + ".stl"
session = ses.Session()
last_directory = session.get('paths', 'last_directory_3d_surface', '')
dlg = wx.FileDialog(None,
_("Save 3D surface as..."), # title
last_directory, # last used directory
project_name, # filename
WILDCARD_SAVE_3D,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(3) # default is STL
if dlg.ShowModal() == wx.ID_OK:
filetype_index = dlg.GetFilterIndex()
filetype = INDEX_TO_TYPE_3D[filetype_index]
filename = dlg.GetPath()
extension = INDEX_TO_EXTENSION[filetype_index]
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
if filename:
session['paths']['last_directory_3d_surface'] = os.path.split(filename)[0]
session.WriteSessionFile()
Publisher.sendMessage('Export surface to file',
filename=filename, filetype=filetype)
else:
dlg = wx.MessageDialog(None,
_("You need to create a surface and make it ") +
_("visible before exporting it."),
'InVesalius 3',
wx.OK | wx.ICON_INFORMATION)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnLinkRequestRP(self, evt=None):
pass
def OnLinkReport(self, evt=None):
pass
def OnButton(self, evt):
id = evt.GetId()
if id == BTN_PICTURE:
self.OnLinkExportPicture()
elif id == BTN_SURFACE:
self.OnLinkExportSurface()
elif id == BTN_REPORT:
self.OnLinkReport()
elif id == BTN_REQUEST_RP:
self.OnLinkRequestRP()
else:# id == BTN_MASK:
self.OnLinkExportMask()
|
paulojamorim/invesalius3
|
invesalius/gui/task_exporter.py
|
Python
|
gpl-2.0
| 15,185
|
[
"VTK"
] |
25a971f96db04cfb7147ae7025b61be257b12e7582d86b0920f7051d6b848711
|
# vim: set expandtab shiftwidth=2 softtabstop=2:
# difmap
# "genmap" - generate a new map model
#
from TEMPy.MapParser import MapParser
from TEMPy.ScoringFunctions import ScoringFunctions
from TEMPy.StructureParser import PDBParser
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.class_arg import TempyParser
from traceback import print_exc
import os,sys
import numpy as np
from chimerax.core.map.volume import Volume
from chimerax.core.atomic.structure import AtomicStructure
from .util import chimera_to_tempy_map, tempy_to_chimera_map
from . import tool_select
from . import tool_layout
def tool_genmap(toptool):
""" Called from the GUI. """
# Check contours
contour1 = None
contour2 = None
if toptool._widget_c1_dif.isEnabled() and toptool._widget_c1_dif.isEnabled():
try:
contour1 = float(toptool._widget_c1_dif.text())
contour2 = float(toptool._widget_c2_dif.text())
except:
toptool._show_error("Check the values fo contour1 and contour2")
return
# Find models
result, map0, map1 = tool_select.select_two(toptool)
if result:
if isinstance(map0, Volume) and isinstance(map1, Volume):
try:
rez1 = float(toptool._widget_rez1_dif.text())
rez2 = float(toptool._widget_rez2_dif.text())
genmap(toptool.session,map0,map1,rez1,rez2,contour1,contour2)
except:
toptool._show_error("Check the values for rez1 and rez2")
return
else :
toptool._show_error("Please select two maps.")
return
def map_contour(m,t=-1.):
c1 = None
if t != -1.0:
zeropeak,ave,sigma1 = m._peak_density()
if not zeropeak is None: c1 = zeropeak+(t*sigma1)
else:
c1 = 0.0
return c1
def genmap(session, map0 = None, map1 = None, rez1 = None, rez2 = None, c1 = None, c2 = None):
""" Generate our new map."""
m0 = chimera_to_tempy_map(map0)
m1 = chimera_to_tempy_map(map1)
# What do we do with the contours? We may already have them?
# TODO - pull contours from m0,m1
#MAIN CALCULATION
#whether to shift density to positive values
if c1 == None:
c1 = map_contour(m0,t=1.5)
if c2 == None:
c2 = map_contour(m1,t=1.5)
c1 = (c1 - m0.min())
c2 = (c2 - m1.min())
m0.fullMap = (m0.fullMap - m0.min())
m1.fullMap = (m1.fullMap - m1.min())
#find a common box to hold both maps
spacing = max(m0.apix,m1.apix)
grid_shape, new_ori = m0._alignment_box(m1,spacing)
emmap_1 = m0.copy()
emmap_2 = m1.copy()
#resample scaled maps to the common grid
spacing = max(rez1,rez2)*0.33
# Not sure we should do scaling here?
sc = ScoringFunctions()
emmap_1.fullMap,emmap_2.fullMap = sc._amplitude_match(m0,m1,0,0,0.02,0,0,max(rez1,rez2),lpfiltb=True,lpfilta=False,ref=False)
apix_ratio = emmap_1.apix/spacing
diff1 = emmap_1._interpolate_to_grid(grid_shape,spacing,new_ori,1)
diff2 = emmap_2._interpolate_to_grid(grid_shape,spacing,new_ori,1)
# get mask inside contour for the initial maps
emmap_1.fullMap = (m0.fullMap>c1)*1.0
emmap_2.fullMap = (m1.fullMap>c2)*1.0
#interpolate masks into common grid
mask1 = emmap_1._interpolate_to_grid(grid_shape,spacing,new_ori,1,'zero')
mask2 = emmap_2._interpolate_to_grid(grid_shape,spacing,new_ori,1,'zero')
mask1.fullMap = mask1.fullMap > 0.1
mask2.fullMap = mask2.fullMap > 0.1
#min of minimums in the two scaled maps
min1 = diff1.min()
min2 = diff2.min()
min_scaled_maps = min(min1,min2)
#shift to positive values
diff1.fullMap = diff1.fullMap - min_scaled_maps
diff2.fullMap = diff2.fullMap - min_scaled_maps
#range of values in the scaled maps
min1 = np.amin(diff1.fullMap[mask1.fullMap])
diffc1 = min1+0.10*(np.amax(diff1.fullMap)-min1)
min2 = np.amin(diff2.fullMap[mask2.fullMap])
diffc2 = min2+0.10*(np.amax(diff2.fullMap)-min2)
#calculate difference
diff_map = diff1.copy()
#calculate difference
diff1.fullMap = (diff1.fullMap - diff2.fullMap)
diff2.fullMap = (diff2.fullMap - diff_map.fullMap)
diff1.fullMap = diff1.fullMap*(mask1.fullMap)
diff2.fullMap = diff2.fullMap*(mask2.fullMap)
#interpolate back to original grids
#mask1 = diff1._interpolate_to_grid1(m0.fullMap.shape,m0.apix,m0.origin,1,'zero')
mask1 = diff1._interpolate_to_grid(m0.fullMap.shape,m0.apix,m0.origin,1,'zero')
mask2 = diff2._interpolate_to_grid(m1.fullMap.shape,m1.apix,m1.origin,1,'zero')
# for assigning differences (see below), use positive differences
mask1.fullMap = mask1.fullMap*(mask1.fullMap>0.)
mask2.fullMap = mask2.fullMap*(mask2.fullMap>0.)
nm0 = tempy_to_chimera_map(session, mask1)
nm1 = tempy_to_chimera_map(session, mask2)
session.models.add([nm0,nm1])
|
OniDaito/ChimeraXTempy
|
src/difmap.py
|
Python
|
mit
| 4,685
|
[
"ChimeraX"
] |
6677ba5aae17d597ec350413e85da0bd6b694dacb137c22c6480cdc213585aaa
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
import unittest
from pyemma.util.files import TemporaryDirectory
from logging import getLogger
from six.moves import range
import numpy as np
import pyemma.coordinates as coor
import pyemma.util.types as types
logger = getLogger('pyemma.'+'TestCluster')
class TestClusterAssign(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestClusterAssign, cls).setUpClass()
# generate Gaussian mixture
means = [np.array([-3,0]),
np.array([-1,1]),
np.array([0,0]),
np.array([1,-1]),
np.array([4,2])]
widths = [np.array([0.1,0.1]),
np.array([0.1,0.1]),
np.array([0.1,0.1]),
np.array([0.1,0.1]),
np.array([0.1,0.1])]
# data
cls.nsample = 1000
cls.T = len(means)*cls.nsample
cls.X = np.zeros((cls.T, 2))
for i in range(len(means)):
cls.X[i*cls.nsample:(i+1)*cls.nsample,0] = widths[i][0] * np.random.randn() + means[i][0]
cls.X[i*cls.nsample:(i+1)*cls.nsample,1] = widths[i][1] * np.random.randn() + means[i][1]
# try assigning actual centers:
cls.centers = np.array([[-3,0],
[-1,1],
[0,0],
[1,-1],
[4,2]])
# assignment
cls.ass = coor.assign_to_centers(data = cls.X, centers=cls.centers, return_dtrajs=False, n_jobs=1)
def test_chunksize(self):
assert types.is_int(self.ass.chunksize)
def test_clustercenters(self):
c = self.ass
assert c.clustercenters.shape[0] == self.centers.shape[0]
assert c.clustercenters.shape[1] == 2
def test_data_producer(self):
c = self.ass
assert c.data_producer is not None
def test_describe(self):
c = self.ass
desc = c.describe()
assert types.is_string(desc) or types.is_list_of_string(desc)
def test_dimension(self):
c = self.ass
assert types.is_int(c.dimension())
assert c.dimension() == 1
def test_dtrajs(self):
c = self.ass
assert len(c.dtrajs) == 1
assert c.dtrajs[0].dtype == c.output_type()
assert len(c.dtrajs[0]) == self.T
# assignment in this case should be perfect
for i in range(self.T):
assert c.dtrajs[0][i] == int(i / self.nsample)
def test_return_dtrajs(self):
dtrajs = coor.assign_to_centers(data=self.X, centers=self.centers)
for dtraj in dtrajs:
assert types.is_int_vector(dtraj)
def test_get_output(self):
c = self.ass
O = c.get_output()
assert types.is_list(O)
assert len(O) == 1
assert types.is_int_matrix(O[0])
assert O[0].shape[0] == self.T
assert O[0].shape[1] == 1
def test_in_memory(self):
c = self.ass
assert isinstance(c.in_memory, bool)
def test_iterator(self):
c = self.ass
for itraj, chunk in c:
assert types.is_int(itraj)
assert types.is_int_matrix(chunk)
assert chunk.shape[0] <= c.chunksize or c.chunksize == 0
assert chunk.shape[1] == c.dimension()
def test_map(self):
c = self.ass
Y = c.transform(self.X)
assert Y.shape[0] == self.T
assert Y.shape[1] == 1
# test if consistent with get_output
assert np.allclose(Y, c.get_output()[0])
def test_n_frames_total(self):
c = self.ass
c.n_frames_total() == self.T
def test_number_of_trajectories(self):
c = self.ass
c.number_of_trajectories() == 1
def test_output_type(self):
c = self.ass
assert c.output_type() == np.int32
def test_parametrize(self):
c = self.ass
# nothing should happen
c.parametrize()
def test_save_dtrajs(self):
c = self.ass
prefix = "test"
extension = ".dtraj"
with TemporaryDirectory() as outdir:
c.save_dtrajs(trajfiles=None, prefix=prefix, output_dir=outdir, extension=extension)
names = ["%s_%i%s" % (prefix, i, extension)
for i in range(c.data_producer.number_of_trajectories())]
names = [os.path.join(outdir, n) for n in names]
# check files with given patterns are there
for f in names:
os.stat(f)
def test_trajectory_length(self):
c = self.ass
assert c.trajectory_length(0) == self.T
with self.assertRaises(IndexError):
c.trajectory_length(1)
def test_trajectory_lengths(self):
c = self.ass
assert len(c.trajectory_lengths()) == 1
assert c.trajectory_lengths()[0] == c.trajectory_length(0)
def test_wrong_centers_argument(self):
dim = 3
data = np.empty((100,dim))
centers = np.empty((5, dim+1))
with self.assertRaises(ValueError):
c = coor.assign_to_centers(data, centers)
def test_wrong_centers_argument2(self):
dim = 3
data = np.empty((100,dim))
centers = np.empty(1)
with self.assertRaises(ValueError):
c = coor.assign_to_centers(data, centers)
def test_threads_env_num_threads_fixed(self):
import os
old_val = os.getenv('OMP_NUM_THREADS', '')
os.environ['OMP_NUM_THREADS'] = '4'
desired_n_jobs=2
try:
assert os.environ['OMP_NUM_THREADS'] == "4"
X = np.random.random((1000, 3))
centers = X[np.random.choice(1000, 10)]
res = coor.assign_to_centers(X, centers, n_jobs=desired_n_jobs, return_dtrajs=False)
self.assertEqual(res.n_jobs, desired_n_jobs)
finally:
del os.environ['OMP_NUM_THREADS']
def test_threads_env_num_threads_fixed_def_arg(self):
import os
desired_n_jobs = 3
os.environ['OMP_NUM_THREADS'] = str(desired_n_jobs)
try:
assert os.environ['OMP_NUM_THREADS'] == str(desired_n_jobs)
X = np.random.random((1000, 3))
centers = X[np.random.choice(1000, 10)]
# note: we want another job number here, but it will be ignored!
res = coor.assign_to_centers(X, centers, n_jobs=None, return_dtrajs=False)
self.assertEqual(res.n_jobs, desired_n_jobs)
finally:
del os.environ['OMP_NUM_THREADS']
def test_threads_omp_env_arg_borked(self):
import os
os.environ['OMP_NUM_THREADS'] = 'this is not right'
try:
import psutil
X = np.random.random((1000, 3))
centers = X[np.random.choice(1000, 10)]
# note: we want another job number here, but it will be ignored!
res = coor.assign_to_centers(X, centers, n_jobs=None, return_dtrajs=False)
self.assertEqual(res.n_jobs, psutil.cpu_count())
finally:
del os.environ['OMP_NUM_THREADS']
def test_threads_cpu_count_def_arg(self):
import psutil
X = np.random.random((1000, 3))
centers = X[np.random.choice(1000, 10)]
# note: we want another job number here, but it will be ignored!
res = coor.assign_to_centers(X, centers, return_dtrajs=False)
self.assertEqual(res.n_jobs, psutil.cpu_count())
def test_assignment_multithread(self):
# re-do assignment with multiple threads and compare results
n = 10000
dim = 100
chunksize=1000
X = np.random.random((n, dim))
centers = X[np.random.choice(n, dim)]
assignment_mp = coor.assign_to_centers(X, centers, n_jobs=4, chunk_size=chunksize)
assignment_sp = coor.assign_to_centers(X, centers, n_jobs=1, chunk_size=chunksize)
np.testing.assert_equal(assignment_mp, assignment_sp)
def test_assignment_multithread_minrsmd(self):
# re-do assignment with multiple threads and compare results
n = 10000
dim = 100
chunksize = 1000
X = np.random.random((n, dim))
centers = X[np.random.choice(n, dim)]
assignment_mp = coor.assign_to_centers(X, centers, n_jobs=4, chunk_size=chunksize, metric='minRMSD')
assignment_sp = coor.assign_to_centers(X, centers, n_jobs=1, chunk_size=chunksize, metric='minRMSD')
np.testing.assert_equal(assignment_mp, assignment_sp)
def test_min_rmsd(self):
import pyemma.datasets as data
d = data.get_bpti_test_data()
reader = coor.source(d['trajs'], top=d['top'])
N_centers = 9
centers = np.asarray((reader.ra_itraj_jagged[0, [0, 1, 7]],
reader.ra_itraj_jagged[1, [32, 1, 23]],
reader.ra_itraj_jagged[2, [17, 8, 15]])
).reshape((N_centers, -1))
dtraj = coor.assign_to_centers(reader, centers=centers, metric='minRMSD', return_dtrajs=True)
num_assigned_states = len(np.unique(np.concatenate(dtraj)))
self.assertEqual(num_assigned_states, N_centers,
"assigned states=%s out of %s possible ones."
% (num_assigned_states, N_centers))
if __name__ == "__main__":
unittest.main()
|
gph82/PyEMMA
|
pyemma/coordinates/tests/test_assign.py
|
Python
|
lgpl-3.0
| 10,159
|
[
"Gaussian"
] |
02daaa38178e82cdc85f44f89c15c54d7c93c6bfbb20933c67d9d1bd72c1ffb1
|
# -*- coding: utf-8 -*-
import itertools
import os
import re
import urllib
import logging
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from modularodm.exceptions import NoResultsFound
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project import signals as project_signals
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
WRITABLE_WHITELIST = [
'title',
'description',
'category',
]
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def sanction(self):
sanction = self.registration_approval or self.embargo or self.retraction
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
@property
def nodes_active(self):
return [x for x in self.nodes if not x.is_deleted]
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def has_permission_on_children(self, user, permission):
"""Checks if the given user has a given permission on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, permission):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_permission_on_children(user, permission):
return True
return False
def has_addon_on_children(self, addon):
"""Checks if a given node has a specific addon on child nodes
that are not registrations or deleted
"""
if self.has_addon(addon):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_addon_on_children(addon):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
values[key] = {
'old': getattr(self, key),
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
self.add_log(NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributor': contributor._id,
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
"""
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public.")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
return True
def admin_public_wiki(self, user):
return (
self.has_addon('wiki') and
self.has_permission(user, 'admin') and
self.is_public
)
def include_wiki_settings(self, user):
"""Check if node meets requirements to make publicly editable."""
return (
self.admin_public_wiki(user) or
any(
each.admin_public_wiki(user)
for each in self.get_descendants_recursive()
)
)
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user):
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
approval = self._initiate_approval(user)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
# TODO make private?
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction object is a generic way to track approval states"""
abstract = True
UNAPPROVED = 'unapproved'
APPROVED = 'approved'
REJECTED = 'rejected'
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'approved', or 'rejected'
state = fields.StringField(default='unapproved')
def __repr__(self):
return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self)
@property
def pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def _validate_authorizer(self, user):
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user):
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
self.save()
return True
def _on_approve(self, user, token):
if all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def _on_reject(self, user, token):
"""Early termination of a Sanction"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""When a Sanction has unanimous approval"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user, token)
def forcibly_reject(self):
self.state = Sanction.REJECTED
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(Sanction):
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
class Embargo(EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_approval
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
}
def _validate_authorizer(self, user):
registration = Node.find_one(Q('embargo', 'eq', self))
return registration.has_permission(user, ADMIN)
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.state == self.COMPLETED
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
self.state == self.APPROVED
self.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _view_url_context(self, user_id):
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.state = self.APPROVED
self.save()
def _on_reject(self, user, token):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
|
lyndsysimon/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 125,565
|
[
"VisIt"
] |
f0cdb8af770b57e8d8aeecaa5878b397b22e6439052e64c0efdc88dc21679d68
|
from lost_visions import models
__author__ = 'ubuntu'
def add_irish_life_linked_images():
to_link = {
"11126612333":
[
{
"name": "The Meeting Of Ned And Kathleen",
"file": "PAGE 182 The Meeting Of Ned And Kathleen.jpg"
}
],
"11123139574":
[
{
"name": "Farmer O'Shaughnessy" ,
"file": "PAGE 14 Farmer O'Shaughnessy.jpg"
},
{
"name": "Farmer O'Shaughnessy Reverse Note",
"file": "PAGE 14 Farmer O'Shaughnessy Reverse Note.jpg"
}
],
"11122550103":
[
{
"name": "Mrs Moriarty's Appeal To Fr. Burke" ,
"file": "PAGE 22 Mrs Moriarty's Appeal To Fr. Burke.jpg"
},
{
"name": "Mrs Moriarty's Appeal To Fr. Burke Reverse Note",
"file": "PAGE 22 Mrs Moriarty's Appeal To Fr. Burke Reverse Note.jpg"
}
],
"11123797044":
[
{
"name": "The Mitchellstown Caves" ,
"file": "PAGE 30 The Mitchellstown Caves.jpg"
},
{
"name": "The Mitchellstown Caves Reverse Note",
"file": "PAGE 30 The Mitchellstown Caves Reverse Note.jpg"
}
],
"11126097286":
[
{
"name": "The Deliverence" ,
"file": "PAGE 54 The Deliverence.jpg"
},
{
"name": "The Deliverence Reverse Note",
"file": "PAGE 54 The Deliverence Reverse Note.jpg"
}
],
"11127443786":
[
{
"name": "Phelim McCarthy" ,
"file": "PAGE 94 Phelim McCarthy.jpg"
},
{
"name": "Phelim McCarthy Reverse Note & Drawing",
"file": "PAGE 94 Phelim McCarthy Reverse Note & Drawing.jpg"
}
],
"11121871366":
[
{
"name": "The Present To McCarthy" ,
"file": "PAGE 98 The Present To McCarthy.jpg"
},
{
"name": "The Present To McCarthy Reverse Note",
"file": "PAGE 98 The Present To McCarthy Reverse Note.jpg"
}
],
"11122038386":
[
{
"name": "Phelim's Visit To The Hovel",
"file": "PAGE 106 Phelim's Visit To The Hovel.jpg"
},
{
"name": "Phelim's Visit To The Hovel Reverse Note",
"file": "PAGE 106 Phelim's Visit To The Hovel Reverse Note.jpg"
}
],
"11125452786":
[
{
"name": "Ned Cassidy Of The Lakes",
"file": "PAGE 110 Ned Cassidy Of The Lakes.jpg"
},
{
"name": "Ned Cassidy Of The Lakes Reverse Note",
"file": "PAGE 110 Ned Cassidy Of The Lakes Reverse Note.jpg"
}
],
"11127076054":
[
{
"name": "The Lady's Present To Ned",
"file": "PAGE 118 The Lady's Present To Ned.jpg"
},
{
"name": "The Lady's Present To Ned Reverse Note",
"file": "PAGE 118 The Lady's Present To Ned Reverse Note.jpg"
}
],
"11125131876":
[
{
"name": "Ned Cassidy On The Lakes",
"file": "PAGE 126 Ned Cassidy On The Lakes.jpg"
},
{
"name": "Ned Cassidy On The Lakes Reverse Note",
"file": "PAGE 126 Ned Cassidy On The Lakes Reverse Note.jpg"
}
],
"11127214014":
[
{
"name": "Ned Saving His Enemy Dan Foley",
"file": "PAGE 134 Ned Saving His Enemy Dan Foley.jpg"
},
{
"name": "Ned Saving His Enemy Dan Foley Reverse Note",
"file": "PAGE 134 Ned Saving His Enemy Dan Foley Reverse Note.jpg"
}
],
"11122097133":
[
{
"name": "Ned Cassidy Of THe Lakes In 'The Steel Bracelets'",
"file": "PAGE 158 Ned Cassidy Of THe Lakes In 'The Steel Bracelets'.jpg"
},
{
"name": "Ned Cassidy Of THe Lakes In 'The Steel Bracelets' Reverse Note",
"file": "PAGE 158 Ned Cassidy Of THe Lakes In 'The Steel Bracelets' Reverse Note.jpg"
}
],
"11123252635":
[
{
"name": "Corporal Duffy About To Search The House",
"file": "PAGE 166 Corporal Duffy About To Search The House.jpg"
},
{
"name": "Corporal Duffy About To Search The House Reverse Note",
"file": "PAGE 166 Corporal Duffy About To Search The House Reverse Note.jpg"
}
],
"11120961556":
[
{
"name": "The Governor Of The Gaol Visits Ned",
"file": "PAGE 174 The Governor Of The Gaol Visits Ned.jpg"
},
{
"name": "The Governor Of The Gaol Visits Ned Reverse Note",
"file": "PAGE 174 The Governor Of The Gaol Visits Ned Reverse Note.jpg"
}
]
}
for link in to_link:
images = to_link[link]
for img in images:
linked_image = models.LinkedImage()
linked_image.name = img['name']
linked_image.file_name = img['file']
linked_image.location = '/home/spx5ich/linked_images'
linked_image.image = models.Image.objects.get(flickr_id=link)
linked_image.description = "This sketch is the property of Tom Gilboy. " \
"Please contact The Illustration Archive for permission to reproduce"
linked_image.save()
add_irish_life_linked_images()
|
CSCSI/Lost-Visions
|
lost_visions/utils/data_import.py
|
Python
|
apache-2.0
| 6,758
|
[
"VisIt"
] |
06d4628aa4617a576aed063629788f654abcc5dd49f2f6e024e24904d4756a4b
|
import dendropy
import numpy as np
from io_util import *
def delimit_newick(infile_name):
from Bio import Phylo
from cStringIO import StringIO
tmp_tree = Phylo.read(infile_name, 'newick')
for t in tmp_tree.get_terminals():
t.name = "'"+t.name+"'"
tree_string = StringIO()
Phylo.write(tmp_tree, tree_string, format="newick")
delimited_tree = tree_string.getvalue().replace("\\'","")
tree_string.close()
return delimited_tree
def color_BioTree_by_attribute(T,attribute, vmin=None, vmax = None, missing_val='min', transform = lambda x:x, cmap=None):
'''
simple function that assigns a color to each node in a biopython tree
the color can be determined by any attribute of the nodes. missing attributes will be
determined from the children, all children are assumed to have the attribute
in addition, the attribute can be transformed for example by taking the log
parameters:
T -- BioPython tree
attribute -- name of the attribute that is to be used to color the tree.
vmin -- lower offset that is subtracted
vmax -- values are scaled as (val-vmin)/(vmax-vmin)
missing val -- if the attribute does not exist is a particular node,
the min, max, or mean of the children is used
transform -- function mapping float to float, e.g. log
cmap -- colormap to be used
'''
import numpy as np
# make a list of tranformed data
vals = [transform(t.__getattribute__(attribute)) for t in
T.get_terminals()+T.get_nonterminals() if attribute in t.__dict__]
if vmin is None: # if vmin or vmax is not provided, use min or max of data
vmin = min(vals)
print "Set vmin to",vmin
if vmax is None:
vmax = max(vals)
print "Set vmax to",vmax
if cmap is None:
from matplotlib.cm import jet
cmap=jet
# assign function used to determine missing values from children
if missing_val=='min':
missing_val_func = min
elif missing_val=='mean':
missing_val_func = mean
elif missing_val=='max':
missing_val_func = max
else:
missing_val_func = min
# loop over all nodes, catch missing values and assign
for node in T.get_nonterminals(order='postorder'):
if attribute not in node.__dict__:
node.__setattr__(attribute, missing_val_func([c.__getattribute__(attribute) for c in node.clades]))
print "node", node,"has no",attribute,"Setting to min:", node.__getattribute__(attribute)
# map value to color for each node
for node in T.get_terminals()+T.get_nonterminals():
node.color = map(int, np.array(cmap((transform(node.__getattribute__(attribute))-vmin)/(vmax-vmin))[:-1])*255)
def to_Biopython(tree):
from Bio import Phylo
from StringIO import StringIO
from itertools import izip
bT = Phylo.read(StringIO(tree.as_newick_string()), 'newick')
for new_leaf, old_leaf in izip(bT.get_terminals(), tree.leaf_nodes()):
for attr,val in old_leaf.__dict__.iteritems():
try:
new_leaf.__setattr__(attr, float(val))
except:
new_leaf.__setattr__(attr, val)
for new_leaf, old_leaf in izip(bT.get_nonterminals(order='postorder'), tree.postorder_internal_node_iter()):
for attr,val in old_leaf.__dict__.iteritems():
try:
new_leaf.__setattr__(attr, float(val))
except:
new_leaf.__setattr__(attr, val)
return bT
def tip_descendants(node):
"""Take node, ie. dict, and return a flattened list of all tips descending from this node"""
if 'children' in node:
for child in node['children']:
for desc in tip_descendants(child):
yield desc
else:
yield node
def all_descendants(node):
"""Take node, ie. dict, and return a flattened list of all nodes descending from this node"""
yield node
if 'children' in node:
for child in node['children']:
for desc in all_descendants(child):
yield desc
def get_dates(node):
"""Return ordered list of dates of descendants of a node"""
return sorted([n['date'] for n in node.leaf_iter()])
def dendropy_to_json(node, extra_attr = []):
json = {}
str_attr = ['country','region','clade','strain', 'date']
num_attr = ['xvalue', 'yvalue', 'num_date']
for prop in str_attr:
if hasattr(node, prop):
json[prop] = node.__getattribute__(prop)
for prop in num_attr:
if hasattr(node, prop):
try:
json[prop] = round(node.__getattribute__(prop),5)
except:
print "cannot round:", node.__getattribute__(prop), "assigned as is"
json[prop] = node.__getattribute__(prop)
for prop in extra_attr:
if len(prop)==2 and callable(prop[1]):
if hasattr(node, prop[0]):
json[prop] = prop[1](node.__getattribute__(prop[0]))
else:
if hasattr(node, prop):
json[prop] = node.__getattribute__(prop)
if hasattr(node, 'freq') and node.freq is not None:
json['freq'] = {reg: list(freq) if freq is not None else "undefined" for reg, freq in node.freq.iteritems()}
if hasattr(node, 'pivots'):
json['pivots'] = list(node.pivots)
if node.child_nodes():
json["children"] = []
for ch in node.child_nodes():
json["children"].append(dendropy_to_json(ch, extra_attr))
return json
def json_to_dendropy(json):
'''
read a json dictionary and make a dendropy tree from it.
'''
tree = dendropy.Tree()
tree.get_from_string(';', 'newick')
root = tree.seed_node
json_to_dendropy_sub(json, root, tree.taxon_set)
root.edge_length=0.0
return tree
def json_to_dendropy_sub(json, node, taxon_set):
'''
recursively calls itself for all children of node and
builds up the tree. entries in json are added as node attributes
'''
if 'xvalue' in json:
node.xvalue = float(json['xvalue'])
for attr,val in json.iteritems():
if attr=='children':
for sub_json in val:
child_node = dendropy.Node()
json_to_dendropy_sub(sub_json, child_node, taxon_set)
if hasattr(child_node, 'xvalue'):
node.add_child(child_node, edge_length = child_node.xvalue - node.xvalue)
elif hasattr(child_node, 'branch_length'):
node.add_child(child_node, edge_length = child_node.branch_length)
else:
node.add_child(child_node, edge_length = 1.0)
else:
try:
node.__setattr__(attr, float(val))
except:
if val=='undefined':
node.__setattr__(attr, None)
else:
node.__setattr__(attr, val)
if len(node.child_nodes())==0:
node.taxon = dendropy.Taxon(label=json['strain'].lower())
node.strain = json['strain']
taxon_set.add_taxon(node.taxon)
def BioPhylo_to_json(node):
json = {}
if hasattr(node, 'clade'):
json['clade'] = node.clade
if node.name:
json['strain'] = str(node.name).replace("'", '')
if hasattr(node, 'branch_length'):
json['branch_length'] = round(node.branch_length, 5)
if hasattr(node, 'xvalue'):
json['xvalue'] = round(node.xvalue, 5)
if hasattr(node, 'yvalue'):
json['yvalue'] = round(node.yvalue, 5)
if hasattr(node, 'date'):
json['date'] = node.date
if hasattr(node, 'seq'):
json['seq'] = str(node.seq)
if len(node.clades):
json["children"] = []
for ch in node.clades:
json["children"].append(BioPhylo_to_json(ch))
return json
|
doerlbh/Indie-nextflu
|
augur/src/tree_util.py
|
Python
|
agpl-3.0
| 6,886
|
[
"Biopython"
] |
b89b9b3551af570bb8aff51948358a34019db49604c5dfd85f337fcee1f9adda
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a beam pipeline to run the WENO5 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import app
from absl import flags
import apache_beam as beam
import numpy as np
from pde_superresolution import equations
from pde_superresolution import integrate
from pde_superresolution import xarray_beam
# NOTE(shoyer): allow_override=True lets us import multiple binaries for the
# purpose of running integration tests. This is safe since we're strict about
# only using FLAGS inside main().
# files
flags.DEFINE_string(
'output_path', '',
'Full path to which to save the resulting netCDF file.',
allow_override=True)
# equation parameters
flags.DEFINE_enum(
'equation_name', 'burgers', list(equations.EQUATION_TYPES),
'Equation to integrate.',
allow_override=True)
flags.DEFINE_string(
'equation_kwargs', '{"num_points": 400}',
'Parameters to pass to the equation constructor.',
allow_override=True)
flags.DEFINE_integer(
'num_samples', 10,
'Number of times to integrate each equation.',
allow_override=True)
# integrate parameters
flags.DEFINE_float(
'time_max', 10,
'Total time for which to run each integration.',
allow_override=True)
flags.DEFINE_float(
'time_delta', 1,
'Difference between saved time steps in the integration.',
allow_override=True)
flags.DEFINE_float(
'warmup', 0,
'Amount of time to integrate before using the neural network.',
allow_override=True)
flags.DEFINE_enum(
'discretization_method', 'exact', ['exact', 'weno', 'spectral'],
'How the exact solution is discretized. By default, uses the "exact" '
'method that has been saved for this equation.',
allow_override=True)
flags.DEFINE_string(
'integrate_method', 'RK23',
'Method to use for integration with scipy.integrate.solve_ivp.',
allow_override=True)
flags.DEFINE_float(
'exact_filter_interval', 0,
'Interval between periodic filtering. Only used for spectral methods.',
allow_override=True)
FLAGS = flags.FLAGS
def main(_, runner=None):
if runner is None:
# must create before flags are used
runner = beam.runners.DirectRunner()
equation_kwargs = json.loads(FLAGS.equation_kwargs)
use_weno = (FLAGS.discretization_method == 'weno'
or (FLAGS.discretization_method == 'exact'
and FLAGS.equation_name == 'burgers'))
if (not use_weno and FLAGS.exact_filter_interval):
exact_filter_interval = float(FLAGS.exact_filter_interval)
else:
exact_filter_interval = None
def create_equation(seed, name=FLAGS.equation_name,
kwargs=equation_kwargs):
equation_type = (equations.FLUX_EQUATION_TYPES
if use_weno else
equations.EQUATION_TYPES)[name]
return equation_type(random_seed=seed, **kwargs)
def do_integrate(
equation,
times=np.arange(0, FLAGS.time_max + FLAGS.time_delta, FLAGS.time_delta),
warmup=FLAGS.warmup,
integrate_method=FLAGS.integrate_method):
integrate_func = (integrate.integrate_weno
if use_weno
else integrate.integrate_spectral)
return integrate_func(equation, times, warmup, integrate_method,
exact_filter_interval=exact_filter_interval)
def create_equation_and_integrate(seed):
equation = create_equation(seed)
result = do_integrate(equation)
result.coords['sample'] = seed
return result
pipeline = (
beam.Create(list(range(FLAGS.num_samples)))
| beam.Map(create_equation_and_integrate)
| beam.CombineGlobally(xarray_beam.ConcatCombineFn('sample'))
| beam.Map(lambda ds: ds.sortby('sample'))
| beam.Map(xarray_beam.write_netcdf, path=FLAGS.output_path))
runner.run(pipeline)
if __name__ == '__main__':
app.run(main)
|
google/data-driven-discretization-1d
|
pde_superresolution/scripts/create_exact_data.py
|
Python
|
apache-2.0
| 4,584
|
[
"NetCDF"
] |
2b50f4c302a6688f81f674664d2b11c92d4565ece864ba15b8e1530ed59dd504
|
import vtk
import matplotlib.cm as cm
import sys
LABEL_NAMES = [\
'olfactory bulb',
'cerebral cortex',
'lateral septal nuclei',
'striatum',
'globus pallidus',
'thalamus',
'hypothalamus',
'hippocampal formation',
'superior colliculus',
'inferior colliculus',
'cerebellum',
'fimbria',
'internal capsule',
'ventricle',
'ventricle',
'corpus callosum',
'subcommissural organ',
'anterior commissure',
'paraflocculus',
'deep mesencephalic nucleus',
'fornix',
'aqueaduct',
'pineal gland',
'substantia nigra',
'brainstem (remainder)',
'pontine gray',
'fasciculus retroflexus',
'amygdala',
'interpeduncular nucleus',
'periacueductal gray',
'nucleus accumbens',
'optic chiasm',
'supraoptic decussation',
'optic tract',
'lateral lemniscus',
'epithalamus',
'mammillary nucleus',
'cochlear nuclei and nerve'
]
def read_intensity_data(filename):
int_data = {}
fp = open(filename, 'r')
lines = fp.readlines()
for line in lines:
splited = line.strip().split(',')
int_data[int(splited[0])] = float(splited[1])
return int_data
def draw_scene(int_filename, color_mode=0, screen_name=None):
###############################################################################
# read polydata file
#
offscreen = False
draw_axes = False
segs = []
segs_mapper = []
segs_actor = []
transforms = []
transforms_filter = []
seg_fileformat = '/media/nebula/data/bah/seg2/seg%05d.vtk'
#seg_fileformat = '/media/nebula/data/bah/vtk/seg%05d.vtk'
#int_filename = '../matching_area/result.txt'
int_data = read_intensity_data(int_filename)
int_sum = sum(int_data)
int_data_sorted = {}
transform = vtk.vtkTransform()
transform.RotateWXYZ(90, 0, 1, 0)
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetTransform(transform)
transformFilter
for i in range(1, 39):
segs.append(vtk.vtkPolyDataReader())
segs[-1].SetFileName(seg_fileformat % i)
transforms.append(vtk.vtkTransform())
#transforms[-1].RotateWXYZ(90., 0, 1, 0)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transforms[-1])
transforms_filter[-1].SetInputConnection(segs[-1].GetOutputPort())
transforms_filter[-1].Update()
segs_mapper.append(vtk.vtkPolyDataMapper())
#segs_mapper[-1].SetInputConnection(segs[-1].GetOutputPort())
segs_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
segs_actor.append(vtk.vtkActor())
segs_actor[-1].SetMapper(segs_mapper[-1])
segs_actor[-1].GetProperty().SetOpacity(0.1)
#color = cm.jet(i/39.)
#segs_actor[-1].GetProperty().SetColor(color[0], color[1], color[2])
i = 0
for k, v in sorted(int_data.items(), key=lambda x:x[1], reverse=True):
if i < 6:
segs_actor[k-1].GetProperty().SetOpacity(0.8)
if color_mode == 0:
color = ((v-40) / 20.)
segs_actor[k-1].GetProperty().SetColor(color, 0, 0)
elif color_mode == 1:
color = cm.jet( (v-float(int_sum/40)) / float(int_sum) * 10.)
segs_actor[k-1].GetProperty().SetColor(color[0], color[1], color[2])
print ' Rank %d : %s (%d) = %d' % (i+1, LABEL_NAMES[k-1], k, v)
#print color
i += 1
#int_data_sorted[k] = v
#print 'Lank %5d : %d (%d)' % (i, k, v)
###############################################################################
# draw axis
#
if draw_axes:
axesActor = vtk.vtkAxesActor()
###############################################################################
# prepare rendering
#
ren = vtk.vtkRenderer()
ren.SetBackground(0.0, 0.0, 0.0)
if draw_axes:
ren.AddActor(axesActor)
for seg in segs_actor:
ren.AddActor(seg)
renWin = vtk.vtkRenderWindow()
if offscreen:
renWin.SetOffScreenRendering(True)
renWin.AddRenderer(ren)
renWin.SetWindowName('Mouse Brain Viewer 2 + (%s)' % screen_name)
renWin.SetSize(1600, 1600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
get_screenshot(renWin, screen_name + '_1.png')
for trans in transforms:
trans.RotateWXYZ(90., 0, 1, 0)
for trans_filter in transforms_filter:
trans_filter.Update()
renWin.Render()
get_screenshot(renWin, screen_name + '_2.png')
for trans in transforms:
trans.RotateWXYZ(90., 0, 1, 0)
for trans_filter in transforms_filter:
trans_filter.Update()
renWin.Render()
get_screenshot(renWin, screen_name + '_3.png')
#iren.Start()
def get_screenshot(renWin, filename):
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(filename)
writer.SetInput(w2if.GetOutput())
writer.Write()
renWin.Render()
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
filename = ''
if(argc >= 2):
filename = argvs[1]
else:
filename = '../matching_area/result.txt'
if(argc >= 3):
color_mode = int(argvs[2])
else:
color_mode = 1
if(argc >= 4):
gene_name = argvs[3]
else:
gene_name = 'GENE_NAME'
print '************ %s ************' % gene_name
draw_scene(filename, color_mode, gene_name)
|
neuroinformatics/bah2015_registration
|
vtk_test/draw_intensity.py
|
Python
|
mit
| 5,528
|
[
"VTK"
] |
783f6480409e7dcd7577eedfb8a56d6e6f92480752222c3f52d8139a00438eb8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Syntax & semantics for type-hinting custom-functions/PTransforms in the SDK.
This module defines type-hinting objects and the corresponding syntax for
type-hinting function arguments, function return types, or PTransform object
themselves. TypeHint's defined in the module can be used to implement either
static or run-time type-checking in regular Python code.
Type-hints are defined by 'indexing' a type-parameter into a defined
CompositeTypeHint instance:
* 'List[int]'.
Valid type-hints are partitioned into two categories: simple, and composite.
Simple type hints are type hints based on a subset of Python primitive types:
int, bool, float, str, object, None, and bytes. No other primitive types are
allowed.
Composite type-hints are reserved for hinting the types of container-like
Python objects such as 'list'. Composite type-hints can be parameterized by an
inner simple or composite type-hint, using the 'indexing' syntax. In order to
avoid conflicting with the namespace of the built-in container types, when
specifying this category of type-hints, the first letter should capitalized.
The following composite type-hints are permitted. NOTE: 'T' can be any of the
type-hints listed or a simple Python type:
* Any
* Union[T, T, T]
* Optional[T]
* Tuple[T, T]
* Tuple[T, ...]
* List[T]
* KV[T, T]
* Dict[T, T]
* Set[T]
* Iterable[T]
* Iterator[T]
* Generator[T]
Type-hints can be nested, allowing one to define type-hints for complex types:
* 'List[Tuple[int, int, str]]
In addition, type-hints can be used to implement run-time type-checking via the
'type_check' method on each TypeConstraint.
"""
import collections
import copy
import types
# A set of the built-in Python types we don't support, guiding the users
# to templated (upper-case) versions instead.
DISALLOWED_PRIMITIVE_TYPES = (list, set, tuple, dict)
class SimpleTypeHintError(TypeError):
pass
class CompositeTypeHintError(TypeError):
pass
class GetitemConstructor(type):
"""A metaclass that makes Cls[arg] an alias for Cls(arg)."""
def __getitem__(cls, arg):
return cls(arg)
class TypeConstraint(object):
"""The base-class for all created type-constraints defined below.
A TypeConstraint is the result of parameterizing a CompositeTypeHint with
with one of the allowed Python types or another CompositeTypeHint. It
binds and enforces a specific version of a generalized TypeHint.
"""
def _consistent_with_check_(self, sub):
"""Returns whether sub is consistent with self.
Has the same relationship to is_consistent_with() as
__subclasscheck__ does for issubclass().
Not meant to be called directly; call is_consistent_with(sub, self)
instead.
Implementation may assume that maybe_sub_type is not Any
and has been normalized.
"""
raise NotImplementedError
def type_check(self, instance):
"""Determines if the type of 'instance' satisfies this type constraint.
Args:
instance: An instance of a Python object.
Raises:
TypeError: The passed 'instance' doesn't satisfy this TypeConstraint.
Subclasses of TypeConstraint are free to raise any of the subclasses of
TypeError defined above, depending on the manner of the type hint error.
All TypeConstraint sub-classes must define this method in other for the
class object to be created.
"""
raise NotImplementedError
def match_type_variables(self, unused_concrete_type):
return {}
def bind_type_variables(self, unused_bindings):
return self
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return []
def visit(self, visitor, visitor_arg):
"""Visitor method to visit all inner types of a composite type.
Args:
visitor: A callable invoked for all nodes in the type tree comprising
a composite type. The visitor will be called with the node visited
and the visitor argument specified here.
visitor_arg: Visitor callback second argument.
"""
visitor(self, visitor_arg)
for t in self._inner_types():
if isinstance(t, TypeConstraint):
t.visit(visitor, visitor_arg)
else:
visitor(t, visitor_arg)
def match_type_variables(type_constraint, concrete_type):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.match_type_variables(concrete_type)
else:
return {}
def bind_type_variables(type_constraint, bindings):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.bind_type_variables(bindings)
else:
return type_constraint
class SequenceTypeConstraint(TypeConstraint):
"""A common base-class for all sequence related type-constraint classes.
A sequence is defined as an arbitrary length homogeneous container type. Type
hints which fall under this category include: List[T], Set[T], Iterable[T],
and Tuple[T, ...].
Sub-classes may need to override '_consistent_with_check_' if a particular
sequence requires special handling with respect to type compatibility.
Attributes:
inner_type: The type which every element in the sequence should be an
instance of.
"""
def __init__(self, inner_type, sequence_type):
self.inner_type = inner_type
self._sequence_type = sequence_type
def __eq__(self, other):
return (isinstance(other, SequenceTypeConstraint)
and type(self) == type(other)
and self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, sequence_instance):
if not isinstance(sequence_instance, self._sequence_type):
raise CompositeTypeHintError(
"%s type-constraint violated. Valid object instance "
"must be of type '%s'. Instead, an instance of '%s' "
"was received."
% (self._sequence_type.__name__.title(),
self._sequence_type.__name__.lower(),
sequence_instance.__class__.__name__))
for index, elem in enumerate(sequence_instance):
try:
check_constraint(self.inner_type, elem)
except SimpleTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' %
(repr(self), index, _unified_repr(self._sequence_type),
_unified_repr(self.inner_type), elem.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect: %s'
% (repr(self), index, self._sequence_type.__name__, e))
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, SequenceTypeConstraint):
return match_type_variables(self.inner_type, concrete_type.inner_type)
else:
return {}
def bind_type_variables(self, bindings):
bound_inner_type = bind_type_variables(self.inner_type, bindings)
if bound_inner_type == self.inner_type:
return self
else:
bound_self = copy.copy(self)
bound_self.inner_type = bound_inner_type
return bound_self
class CompositeTypeHint(object):
"""The base-class for all created type-hint classes defined below.
CompositeTypeHint's serve primarily as TypeConstraint factories. They are
only required to define a single method: '__getitem__' which should return a
parameterized TypeConstraint, that can be used to enforce static or run-time
type-checking.
'__getitem__' is used as a factory function in order to provide a familiar
API for defining type-hints. The ultimate result is that one will be able to
use: CompositeTypeHint[type_parameter] to create a type-hint object that
behaves like any other Python object. This allows one to create
'type-aliases' by assigning the returned type-hints to a variable.
* Example: 'Coordinates = List[Tuple[int, int]]'
"""
def __getitem___(self, py_type):
"""Given a type creates a TypeConstraint instance parameterized by the type.
This function serves as a factory function which creates TypeConstraint
instances. Additionally, implementations by sub-classes should perform any
sanity checking of the passed types in this method in order to rule-out
disallowed behavior. Such as, attempting to create a TypeConstraint whose
parameterized type is actually an object instance.
Args:
py_type: An instance of a Python type or TypeConstraint.
Returns: An instance of a custom TypeConstraint for this CompositeTypeHint.
Raises:
TypeError: If the passed type violates any contraints for this particular
TypeHint.
"""
raise NotImplementedError
def validate_composite_type_param(type_param, error_msg_prefix):
"""Determines if an object is a valid type parameter to a CompositeTypeHint.
Implements sanity checking to disallow things like:
* List[1, 2, 3] or Dict[5].
Args:
type_param: An object instance.
error_msg_prefix: A string prefix used to format an error message in the
case of an exception.
Raises:
TypeError: If the passed 'type_param' is not a valid type parameter for a
CompositeTypeHint.
"""
# Must either be a TypeConstraint instance or a basic Python type.
is_not_type_constraint = (
not isinstance(type_param, (type, types.ClassType, TypeConstraint))
and type_param is not None)
is_forbidden_type = (isinstance(type_param, type) and
type_param in DISALLOWED_PRIMITIVE_TYPES)
if is_not_type_constraint or is_forbidden_type:
raise TypeError('%s must be a non-sequence, a type, or a TypeConstraint. %s'
' is an instance of %s.' % (error_msg_prefix, type_param,
type_param.__class__.__name__))
def _unified_repr(o):
"""Given an object return a qualified name for the object.
This function closely mirrors '__qualname__' which was introduced in
Python 3.3. It is used primarily to format types or object instances for
error messages.
Args:
o: An instance of a TypeConstraint or a type.
Returns:
A qualified name for the passed Python object fit for string formatting.
"""
return repr(o) if isinstance(
o, (TypeConstraint, types.NoneType)) else o.__name__
def check_constraint(type_constraint, object_instance):
"""Determine if the passed type instance satisfies the TypeConstraint.
When examining a candidate type for constraint satisfaction in
'type_check', all CompositeTypeHint's eventually call this function. This
function may end up being called recursively if the hinted type of a
CompositeTypeHint is another CompositeTypeHint.
Args:
type_constraint: An instance of a TypeConstraint or a built-in Python type.
object_instance: An object instance.
Raises:
SimpleTypeHintError: If 'type_constraint' is a one of the allowed primitive
Python types and 'object_instance' isn't an instance of this type.
CompositeTypeHintError: If 'type_constraint' is a TypeConstraint object and
'object_instance' does not satisfy its constraint.
"""
if type_constraint is None and object_instance is None:
return
elif isinstance(type_constraint, TypeConstraint):
type_constraint.type_check(object_instance)
elif type_constraint is None:
# TODO(robertwb): Fix uses of None for Any.
pass
elif not isinstance(type_constraint, type):
raise RuntimeError("bad type: %s" % (type_constraint,))
elif not isinstance(object_instance, type_constraint):
raise SimpleTypeHintError
class AnyTypeConstraint(TypeConstraint):
"""An Any type-hint.
Any is intended to be used as a "don't care" when hinting the types of
function arguments or return types. All other TypeConstraint's are equivalent
to 'Any', and its 'type_check' method is a no-op.
"""
def __repr__(self):
return 'Any'
def type_check(self, instance):
pass
class TypeVariable(AnyTypeConstraint):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'TypeVariable[%s]' % self.name
def match_type_variables(self, concrete_type):
return {self: concrete_type}
def bind_type_variables(self, bindings):
return bindings.get(self, self)
class UnionHint(CompositeTypeHint):
"""A Union type-hint. Union[X, Y] accepts instances of type X OR type Y.
Duplicate type parameters are ignored. Additonally, Nested Union hints will
be flattened out. For example:
* Union[Union[str, int], bool] -> Union[str, int, bool]
A candidate type instance satisfies a UnionConstraint if it is an
instance of any of the parameterized 'union_types' for a Union.
Union[X] is disallowed, and all type parameters will be sanity checked to
ensure compatibility with nested type-hints.
When comparing two Union hints, ordering is enforced before comparison.
* Union[int, str] == Union[str, int]
"""
class UnionConstraint(TypeConstraint):
def __init__(self, union_types):
self.union_types = set(union_types)
def __eq__(self, other):
return (isinstance(other, UnionHint.UnionConstraint)
and self.union_types == other.union_types)
def __hash__(self):
return 1 + sum(hash(t) for t in self.union_types)
def __repr__(self):
# Sorting the type name strings simplifies unit tests.
return 'Union[%s]' % (', '.join(sorted(_unified_repr(t)
for t in self.union_types)))
def _inner_types(self):
for t in self.union_types:
yield t
def _consistent_with_check_(self, sub):
if isinstance(sub, UnionConstraint):
# A union type is compatible if every possible type is compatible.
# E.g. Union[A, B, C] > Union[A, B].
return all(is_consistent_with(elem, self)
for elem in sub.union_types)
else:
# Other must be compatible with at least one of this union's subtypes.
# E.g. Union[A, B, C] > T if T > A or T > B or T > C.
return any(is_consistent_with(sub, elem)
for elem in self.union_types)
def type_check(self, instance):
error_msg = ''
for t in self.union_types:
try:
check_constraint(t, instance)
return
except TypeError as e:
error_msg = str(e)
continue
raise CompositeTypeHintError(
'%s type-constraint violated. Expected an instance of one of: %s, '
'received %s instead.%s'
% (repr(self),
tuple(sorted(_unified_repr(t) for t in self.union_types)),
instance.__class__.__name__, error_msg))
def __getitem__(self, type_params):
if not isinstance(type_params, (collections.Sequence, set)):
raise TypeError('Cannot create Union without a sequence of types.')
# Flatten nested Union's and duplicated repeated type hints.
params = set()
for t in type_params:
validate_composite_type_param(
t, error_msg_prefix='All parameters to a Union hint'
)
if isinstance(t, self.UnionConstraint):
params |= t.union_types
else:
params.add(t)
if Any in params:
return Any
elif len(params) == 1:
return iter(params).next()
else:
return self.UnionConstraint(params)
UnionConstraint = UnionHint.UnionConstraint
class OptionalHint(UnionHint):
"""An Option type-hint. Optional[X] accepts instances of X or None.
The Optional[X] factory function proxies to Union[X, None]
"""
def __getitem__(self, py_type):
# A single type must have been passed.
if isinstance(py_type, collections.Sequence):
raise TypeError('An Option type-hint only accepts a single type '
'parameter.')
return Union[py_type, None]
class TupleHint(CompositeTypeHint):
"""A Tuple type-hint.
Tuple can accept 1 or more type-hint parameters.
Tuple[X, Y] represents a tuple of *exactly* two elements, with the first
being of type 'X' and the second an instance of type 'Y'.
* (1, 2) satisfies Tuple[int, int]
Additionally, one is able to type-hint an arbitary length, homogeneous tuple
by passing the Ellipsis (...) object as the second parameter.
As an example, Tuple[str, ...] indicates a tuple of any length with each
element being an instance of 'str'.
"""
class TupleSequenceConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(TupleHint.TupleSequenceConstraint, self).__init__(type_param,
tuple)
def __repr__(self):
return 'Tuple[%s, ...]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, TupleConstraint):
# E.g. Tuple[A, B] < Tuple[C, ...] iff A < C and B < C.
return all(is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
else:
return super(TupleSequenceConstraint, self)._consistent_with_check_(sub)
class TupleConstraint(TypeConstraint):
def __init__(self, type_params):
self.tuple_types = tuple(type_params)
def __eq__(self, other):
return (isinstance(other, TupleHint.TupleConstraint)
and self.tuple_types == other.tuple_types)
def __hash__(self):
return hash(self.tuple_types)
def __repr__(self):
return 'Tuple[%s]' % (', '.join(_unified_repr(t)
for t in self.tuple_types))
def _inner_types(self):
for t in self.tuple_types:
yield t
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and len(sub.tuple_types) == len(self.tuple_types)
and all(is_consistent_with(sub_elem, elem)
for sub_elem, elem
in zip(sub.tuple_types, self.tuple_types)))
def type_check(self, tuple_instance):
if not isinstance(tuple_instance, tuple):
raise CompositeTypeHintError(
"Tuple type constraint violated. Valid object instance must be of "
"type 'tuple'. Instead, an instance of '%s' was received."
% tuple_instance.__class__.__name__)
if len(tuple_instance) != len(self.tuple_types):
raise CompositeTypeHintError(
'Passed object instance is of the proper type, but differs in '
'length from the hinted type. Expected a tuple of length %s, '
'received a tuple of length %s.'
% (len(self.tuple_types), len(tuple_instance)))
for type_pos, (expected, actual) in enumerate(zip(self.tuple_types,
tuple_instance)):
try:
check_constraint(expected, actual)
continue
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. Expected an instance of '
'type %s, instead received an instance of type %s.'
% (repr(self), type_pos, _unified_repr(expected),
actual.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. %s'
% (repr(self), type_pos, e))
def match_type_variables(self, concrete_type):
bindings = {}
if isinstance(concrete_type, TupleConstraint):
for a, b in zip(self.tuple_types, concrete_type.tuple_types):
bindings.update(match_type_variables(a, b))
return bindings
def bind_type_variables(self, bindings):
bound_tuple_types = tuple(
bind_type_variables(t, bindings) for t in self.tuple_types)
if bound_tuple_types == self.tuple_types:
return self
else:
return Tuple[bound_tuple_types]
def __getitem__(self, type_params):
ellipsis = False
if not isinstance(type_params, collections.Iterable):
# Special case for hinting tuples with arity-1.
type_params = (type_params,)
if type_params and type_params[-1] == Ellipsis:
if len(type_params) != 2:
raise TypeError('Ellipsis can only be used to type-hint an arbitrary '
'length tuple of containing a single type: '
'Tuple[A, ...].')
# Tuple[A, ...] indicates an arbitary length homogeneous tuple.
type_params = type_params[:1]
ellipsis = True
for t in type_params:
validate_composite_type_param(
t,
error_msg_prefix='All parameters to a Tuple hint'
)
if ellipsis:
return self.TupleSequenceConstraint(type_params[0])
else:
return self.TupleConstraint(type_params)
TupleConstraint = TupleHint.TupleConstraint
TupleSequenceConstraint = TupleHint.TupleSequenceConstraint
class ListHint(CompositeTypeHint):
"""A List type-hint.
List[X] represents an instance of a list populated by a single homogeneous
type. The parameterized type 'X' can either be a built-in Python type or an
instance of another TypeConstraint.
* ['1', '2', '3'] satisfies List[str]
"""
class ListConstraint(SequenceTypeConstraint):
def __init__(self, list_type):
super(ListHint.ListConstraint, self).__init__(list_type, list)
def __repr__(self):
return 'List[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, t):
validate_composite_type_param(t, error_msg_prefix='Parameter to List hint')
return self.ListConstraint(t)
ListConstraint = ListHint.ListConstraint
class KVHint(CompositeTypeHint):
"""A KV type-hint, represents a Key-Value pair of a particular type.
Internally, KV[X, Y] proxies to Tuple[X, Y]. A KV type-hint accepts only
accepts exactly two type-parameters. The first represents the required
key-type and the second the required value-type.
"""
def __getitem__(self, type_params):
if not isinstance(type_params, tuple):
raise TypeError('Parameter to KV type-hint must be a tuple of types: '
'KV[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a KV type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params))
)
return Tuple[type_params]
def key_value_types(kv):
"""Returns the key and value type of a KV type-hint.
Args:
kv: An instance of a TypeConstraint sub-class.
Returns:
A tuple: (key_type, value_type) if the passed type-hint is an instance of a
KV type-hint, and (Any, Any) otherwise.
"""
if isinstance(kv, TupleHint.TupleConstraint):
return kv.tuple_types
return Any, Any
class DictHint(CompositeTypeHint):
"""A Dict type-hint.
Dict[K, V] Represents a dictionary where all keys are of a particular type
and all values are of another (possible the same) type.
"""
class DictConstraint(TypeConstraint):
def __init__(self, key_type, value_type):
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return 'Dict[%s, %s]' % (_unified_repr(self.key_type),
_unified_repr(self.value_type))
def __eq__(self, other):
return (type(self) == type(other)
and self.key_type == other.key_type
and self.value_type == other.value_type)
def __hash__(self):
return hash((type(self), self.key_type, self.value_type))
def _inner_types(self):
yield self.key_type
yield self.value_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.key_type, self.key_type)
and is_consistent_with(sub.key_type, self.key_type))
def _raise_hint_exception_or_inner_exception(self, is_key,
incorrect_instance,
inner_error_message=''):
incorrect_type = 'values' if not is_key else 'keys'
hinted_type = self.value_type if not is_key else self.key_type
if inner_error_message:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of type '
'%s. Instead: %s'
% (repr(self), incorrect_type[:-1], incorrect_type,
_unified_repr(hinted_type), inner_error_message)
)
else:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of '
'type %s. Instead, %s is of type %s.'
% (repr(self), incorrect_type[:-1], incorrect_type,
_unified_repr(hinted_type),
incorrect_instance, incorrect_instance.__class__.__name__)
)
def type_check(self, dict_instance):
if not isinstance(dict_instance, dict):
raise CompositeTypeHintError(
'Dict type-constraint violated. All passed instances must be of '
'type dict. %s is of type %s.'
% (dict_instance, dict_instance.__class__.__name__))
for key, value in dict_instance.iteritems():
try:
check_constraint(self.key_type, key)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(True, key, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(True, key)
try:
check_constraint(self.value_type, value)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(False, value, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(False, value)
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, DictConstraint):
bindings = {}
bindings.update(
match_type_variables(self.key_type, concrete_type.key_type))
bindings.update(
match_type_variables(self.value_type, concrete_type.value_type))
return bindings
else:
return {}
def bind_type_variables(self, bindings):
bound_key_type = bind_type_variables(self.key_type, bindings)
bound_value_type = bind_type_variables(self.value_type, bindings)
if (bound_key_type, self.key_type) == (bound_value_type, self.value_type):
return self
else:
return Dict[bound_key_type, bound_value_type]
def __getitem__(self, type_params):
# Type param must be a (k, v) pair.
if not isinstance(type_params, tuple):
raise TypeError('Parameter to Dict type-hint must be a tuple of types: '
'Dict[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a Dict type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params))
)
key_type, value_type = type_params
validate_composite_type_param(
key_type,
error_msg_prefix='Key-type parameter to a Dict hint'
)
validate_composite_type_param(
value_type,
error_msg_prefix='Value-type parameter to a Dict hint'
)
return self.DictConstraint(key_type, value_type)
DictConstraint = DictHint.DictConstraint
class SetHint(CompositeTypeHint):
"""A Set type-hint.
Set[X] defines a type-hint for a set of homogeneous types. 'X' may be either a
built-in Python type or a another nested TypeConstraint.
"""
class SetTypeConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(SetHint.SetTypeConstraint, self).__init__(type_param, set)
def __repr__(self):
return 'Set[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, type_param):
validate_composite_type_param(
type_param,
error_msg_prefix='Parameter to a Set hint'
)
return self.SetTypeConstraint(type_param)
SetTypeConstraint = SetHint.SetTypeConstraint
class IterableHint(CompositeTypeHint):
"""An Iterable type-hint.
Iterable[X] defines a type-hint for an object implementing an '__iter__'
method which yields objects which are all of the same type.
"""
class IterableTypeConstraint(SequenceTypeConstraint):
def __init__(self, iter_type):
super(IterableHint.IterableTypeConstraint, self).__init__(
iter_type, collections.Iterable)
def __repr__(self):
return 'Iterable[%s]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, SequenceTypeConstraint):
return is_consistent_with(sub.inner_type, self.inner_type)
elif isinstance(sub, TupleConstraint):
if not sub.tuple_types:
# The empty tuple is consistent with Iterator[T] for any T.
return True
else:
# Each element in the hetrogenious tuple must be consistent with
# the iterator type.
# E.g. Tuple[A, B] < Iterable[C] if A < C and B < C.
return all(is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
else:
return False
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterable hint'
)
return self.IterableTypeConstraint(type_param)
IterableTypeConstraint = IterableHint.IterableTypeConstraint
class IteratorHint(CompositeTypeHint):
"""An Iterator type-hint.
Iterator[X] defines a type-hint for an object implementing both '__iter__'
and a 'next' method which yields objects which are all of the same type. Type
checking a type-hint of this type is deferred in order to avoid depleting the
underlying lazily generated sequence. See decorators.interleave_type_check for
further information.
"""
class IteratorTypeConstraint(TypeConstraint):
def __init__(self, t):
self.yielded_type = t
def __repr__(self):
return 'Iterator[%s]' % _unified_repr(self.yielded_type)
def _inner_types(self):
yield self.yielded_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.yielded_type, self.yielded_type))
def type_check(self, instance):
# Special case for lazy types, we only need to enforce the underlying
# type. This avoid having to compute the entirety of the generator/iter.
try:
check_constraint(self.yielded_type, instance)
return
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated: %s' % (repr(self), str(e)))
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. Expected a iterator of type %s. '
'Instead received a iterator of type %s.'
% (repr(self), _unified_repr(self.yielded_type),
instance.__class__.__name__))
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterator hint'
)
return self.IteratorTypeConstraint(type_param)
IteratorTypeConstraint = IteratorHint.IteratorTypeConstraint
class WindowedTypeConstraint(TypeConstraint):
"""A type constraint for WindowedValue objects.
Mostly for internal use.
Attributes:
inner_type: The type which the element should be an instance of.
"""
__metaclass__ = GetitemConstructor
def __init__(self, inner_type):
self.inner_type = inner_type
def __eq__(self, other):
return (isinstance(other, WindowedTypeConstraint)
and self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, instance):
from apache_beam.transforms import window
if not isinstance(instance, window.WindowedValue):
raise CompositeTypeHintError(
"Window type-constraint violated. Valid object instance "
"must be of type 'WindowedValue'. Instead, an instance of '%s' "
"was received."
% (instance.__class__.__name__))
try:
check_constraint(self.inner_type, instance.value)
except (CompositeTypeHintError, SimpleTypeHintError):
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element in '
'is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' %
(repr(self), _unified_repr(self.inner_type), elem.__class__.__name__))
class GeneratorHint(IteratorHint):
pass
# Create the actual instances for all defined type-hints above.
Any = AnyTypeConstraint()
Union = UnionHint()
Optional = OptionalHint()
Tuple = TupleHint()
List = ListHint()
KV = KVHint()
Dict = DictHint()
Set = SetHint()
Iterable = IterableHint()
Iterator = IteratorHint()
Generator = GeneratorHint()
WindowedValue = WindowedTypeConstraint
_KNOWN_PRIMITIVE_TYPES = {
dict: Dict[Any, Any],
list: List[Any],
tuple: Tuple[Any, ...],
set: Set[Any],
# Using None for the NoneType is a common convention.
None: type(None),
}
def normalize(x):
if x in _KNOWN_PRIMITIVE_TYPES:
return _KNOWN_PRIMITIVE_TYPES[x]
else:
return x
def is_consistent_with(sub, base):
"""Returns whether the type a is consistent with b.
This is accordig to the terminology of PEP 483/484. This relationship is
neither symmetric nor transitive, but a good mnemonic to keep in mind is that
is_consistent_with(a, b) is roughly equivalent to the issubclass(a, b)
relation, but also handles the special Any type as well as type
parameterization.
"""
if sub == base:
# Common special case.
return True
if isinstance(sub, AnyTypeConstraint) or isinstance(base, AnyTypeConstraint):
return True
sub = normalize(sub)
base = normalize(base)
if isinstance(base, TypeConstraint):
if isinstance(sub, UnionConstraint):
return all(is_consistent_with(c, base) for c in sub.union_types)
else:
return base._consistent_with_check_(sub)
elif isinstance(sub, TypeConstraint):
# Nothing but object lives above any type constraints.
return base == object
else:
return issubclass(sub, base)
|
jasonkuster/incubator-beam
|
sdks/python/apache_beam/typehints/typehints.py
|
Python
|
apache-2.0
| 35,929
|
[
"VisIt"
] |
90e1131fa6dc4bb4c1dddfb6f885a483178b2f1fad50ef53088c7aa35e2c4091
|
import numpy as np
import util
from datetime import datetime
from scipy.stats import norm
import better_exceptions
from scipy.stats import multivariate_normal as mvn
class NaiveBayers(object):
def __init__(self):
# Gaussian deviation
self.gaussians = dict()
# Class priors
self.priors = dict()
def fit(self, X, Y, smoothing=10e-3):
N, D = X.shape
# 1,2,3,4,5,6,7,8,9,0 - is labels
labels = set(Y)
for c in labels:
# get the current slice [0:number] where X in our class
current_x = X[Y == c]
# Compute mean and variance. Store in the dictionary by class key
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'cov': np.cov(current_x.T) + np.eye(D)*smoothing,
}
# Simple calculate prior probability. Divide current class by all classes
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
# Get the predictions
P = self.predict(X)
# Return mean of array
return np.mean(P == Y)
def predict(self, X):
# N - samples, D - features (classes)
N, D = X.shape
# Hyperparameter (10)
K = len(self.gaussians)
# Fill by Zeros
P = np.zeros((N, K))
# for each class and mean/covariance
for c, g in self.gaussians.items():
mean, cov = g['mean'], g['cov']
log = np.log(self.priors[c])
# Calculate Log of the probability density function, all at once
P[:, c] = mvn.logpdf(X, mean=mean, cov=cov) + log
return np.argmax(P, axis=1)
if __name__ == '__main__':
# Get train data
X, Y = util.get_data(40000)
Ntrain = len(Y) // 2
Xtest, Ytest = util.get_test_data(40000)
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
# Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = NaiveBayers()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time: ", (datetime.now() - t0))
t0 = datetime.now()
print("Training accuracy: ", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy: ", (datetime.now() - t0), "Train size: ", len(Ytrain))
t0 = datetime.now()
print("Test accuracy: ", model.score(Xtest, Ytest))
print("Time to compute test accuracy: ", (datetime.now() - t0), "Test size: ", len(Ytest))
|
adexin/Python-Machine-Learning-Samples
|
Naive_bayes_mnist/nb_covariance.py
|
Python
|
mit
| 2,425
|
[
"Gaussian"
] |
f785d783acb2128b123318dac52775847c9d3e88b6334b27218a00de1d0aa9c5
|
"""Check that reading and writing masses in .con files is consistent."""
import tempfile
import os
import shutil
from numpy import asarray
import ase.lattice.compounds
import ase.data
import ase.io
# Error tolerance.
TOL = 1e-8
data = ase.lattice.compounds.B2(['Cs', 'Cl'], latticeconstant=4.123,
size=(3, 3, 3))
m_Cs = ase.data.atomic_masses[ase.data.atomic_numbers['Cs']]
m_Cl = ase.data.atomic_masses[ase.data.atomic_numbers['Cl']]
tempdir = tempfile.mkdtemp()
try:
con_file = os.path.join(tempdir, 'pos.con')
# Write and read the .con file.
ase.io.write(con_file, data, format='eon')
data2 = ase.io.read(con_file, format='eon')
# Check masses.
symbols = asarray(data2.get_chemical_symbols())
masses = asarray(data2.get_masses())
assert (abs(masses[symbols == 'Cs'] - m_Cs)).sum() < TOL
assert (abs(masses[symbols == 'Cl'] - m_Cl)).sum() < TOL
finally:
shutil.rmtree(tempdir)
|
misdoro/python-ase
|
ase/test/eon/eon_masses.py
|
Python
|
gpl-2.0
| 959
|
[
"ASE"
] |
868d2ae5d1a62ef44224aa3ab3b4053a12f477ef46350f46493d8bb7c4799e66
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_webhook
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Webhook Avi RESTful Object
description:
- This module is used to configure Webhook object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
callback_url:
description:
- Callback url for the webhook.
- Field introduced in 17.1.1.
description:
description:
- Field introduced in 17.1.1.
name:
description:
- The name of the webhook profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the webhook profile.
- Field introduced in 17.1.1.
verification_token:
description:
- Verification token sent back with the callback asquery parameters.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Webhook object
avi_webhook:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_webhook
"""
RETURN = '''
obj:
description: Webhook (api/webhook) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
callback_url=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
verification_token=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'webhook',
set([]))
if __name__ == '__main__':
main()
|
fernandezcuesta/ansible
|
lib/ansible/modules/network/avi/avi_webhook.py
|
Python
|
gpl-3.0
| 3,761
|
[
"VisIt"
] |
6d999573cd83d665cf9ba5cc0c12bafb27e74626358d4f39d813293a29e844fa
|
#!/usr/bin/env python3
import argparse
import io
import zipfile
from collections import defaultdict
from datetime import date
oboInOwl = {
"SynonymTypeProperty": "synonym_type_property",
"hasAlternativeId": "has_alternative_id",
"hasBroadSynonym": "has_broad_synonym",
"hasDbXref": "database_cross_reference",
"hasExactSynonym": "has_exact_synonym",
"hasOBOFormatVersion": "has_obo_format_version",
"hasOBONamespace": "has_obo_namespace",
"hasRelatedSynonym": "has_related_synonym",
"hasScope": "has_scope",
"hasSynonymType": "has_synonym_type",
}
exact_synonym = "oboInOwl:hasExactSynonym"
related_synonym = "oboInOwl:hasRelatedSynonym"
broad_synonym = "oboInOwl:hasBroadSynonym"
predicates = {
"acronym": broad_synonym,
"anamorph": related_synonym,
"blast name": related_synonym,
"common name": exact_synonym,
"equivalent name": exact_synonym,
"genbank acronym": broad_synonym,
"genbank anamorph": related_synonym,
"genbank common name": exact_synonym,
"genbank synonym": related_synonym,
"in-part": related_synonym,
"misnomer": related_synonym,
"misspelling": related_synonym,
"synonym": related_synonym,
"scientific name": exact_synonym,
"teleomorph": related_synonym,
}
ranks = [
"class",
"cohort",
"family",
"forma",
"genus",
"infraclass",
"infraorder",
"kingdom",
"order",
"parvorder",
"phylum",
"section",
"series",
"species group",
"species subgroup",
"species",
"subclass",
"subcohort",
"subfamily",
"subgenus",
"subkingdom",
"suborder",
"subphylum",
"subsection",
"subspecies",
"subtribe",
"superclass",
"superfamily",
"superkingdom",
"superorder",
"superphylum",
"tribe",
"varietas",
]
nodes_fields = [
"tax_id", # node id in GenBank taxonomy database
"parent_tax_id", # parent node id in GenBank taxonomy database
"rank", # rank of this node (superkingdom, kingdom, ...)
"embl_code", # locus-name prefix; not unique
"division_id", # see division.dmp file
"inherited_div_flag", # (1 or 0) 1 if node inherits division from parent
"genetic_code_id", # see gencode.dmp file
"inherited_GC_flag", # (1 or 0) 1 if node inherits genetic code from parent
"mitochondrial_genetic_code_id", # see gencode.dmp file
"inherited_MGC_flag", # (1 or 0) 1 if node inherits mitochondrial gencode from parent
"GenBank_hidden_flag", # (1 or 0) 1 if name is suppressed in GenBank entry lineage
"hidden_subtree_root_flag", # (1 or 0) 1 if this subtree has no sequence data yet
"comments", # free-text comments and citations
]
def escape_literal(text):
return text.replace('"', '\\"')
def label_to_id(text):
return text.replace(" ", "_").replace("-", "_")
def convert_synonyms(tax_id, synonyms):
"""Given a tax_id and list of synonyms,
return a Turtle string asserting triples and OWL annotations on them."""
output = []
for synonym, unique, name_class in synonyms:
if name_class in predicates:
synonym = escape_literal(synonym)
predicate = predicates[name_class]
synonym_type = label_to_id(name_class)
output.append(
f"""
NCBITaxon:{tax_id} {predicate} "{synonym}"^^xsd:string .
[ a owl:Axiom
; owl:annotatedSource NCBITaxon:{tax_id}
; owl:annotatedProperty {predicate}
; owl:annotatedTarget "{synonym}"^^xsd:string
; oboInOwl:hasSynonymType ncbitaxon:{synonym_type}
] ."""
)
return output
def convert_node(node, label, merged, synonyms, citations):
"""Given a node dictionary, a label string, and lists for merged, synonyms, and citations,
return a Turtle string representing this tax_id."""
tax_id = node["tax_id"]
output = [f"NCBITaxon:{tax_id} a owl:Class"]
label = escape_literal(label)
output.append(f'; rdfs:label "{label}"^^xsd:string')
parent_tax_id = node["parent_tax_id"]
if parent_tax_id and parent_tax_id != "" and parent_tax_id != tax_id:
output.append(f"; rdfs:subClassOf NCBITaxon:{parent_tax_id}")
rank = node["rank"]
if rank and rank != "" and rank != "no rank":
if rank not in ranks:
print(f"WARN Unrecognized rank '{rank}'")
rank = label_to_id(rank)
# WARN: This is a special case for backward compatibility
if rank in ["species_group", "species_subgroup"]:
output.append(
f"; ncbitaxon:has_rank <http://purl.obolibrary.org/obo/NCBITaxon#_{rank}>"
)
else:
output.append(f"; ncbitaxon:has_rank NCBITaxon:{rank}")
gc_id = node["genetic_code_id"]
if gc_id:
output.append(f'; oboInOwl:hasDbXref "GC_ID:{gc_id}"^^xsd:string')
for merge in merged:
output.append(f'; oboInOwl:hasAlternativeId "NCBITaxon:{merge}"^^xsd:string')
for pubmed_id in citations:
output.append(f'; oboInOwl:hasDbXref "PMID:{pubmed_id}"^^xsd:string')
output.append('; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string')
output.append(".")
output += convert_synonyms(tax_id, synonyms)
return "\n".join(output)
def split_line(line):
"""Split a line from a .dmp file"""
return [x.strip() for x in line.split(" |")]
def convert(taxdmp_path, output_path, taxa=None):
"""Given the paths to the taxdmp.zip file and an output Turtle file,
and an optional set of tax_id strings to extract,
read from the taxdmp.zip file, collect annotations,
convert nodes to Turtle strings,
and write to the output file."""
scientific_names = defaultdict(list)
labels = {}
synonyms = defaultdict(list)
merged = defaultdict(list)
citations = defaultdict(list)
with open(output_path, "w") as output:
isodate = date.today().isoformat()
output.write(
f"""@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix obo: <http://purl.obolibrary.org/obo/> .
@prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> .
@prefix ncbitaxon: <http://purl.obolibrary.org/obo/ncbitaxon#> .
@prefix NCBITaxon: <http://purl.obolibrary.org/obo/NCBITaxon_> .
@prefix : <http://purl.obolibrary.org/obo/ncbitaxon.owl#> .
<http://purl.obolibrary.org/obo/ncbitaxon.owl> a owl:Ontology
; owl:versionIRI <http://purl.obolibrary.org/obo/ncbitaxon/{isodate}/ncbitaxon.owl>
; rdfs:comment "Built by https://github.com/obophenotype/ncbitaxon"^^xsd:string
.
obo:IAO_0000115 a owl:AnnotationProperty
; rdfs:label "definition"^^xsd:string
.
ncbitaxon:has_rank a owl:AnnotationProperty
; obo:IAO_0000115 "A metadata relation between a class and its taxonomic rank (eg species, family)"^^xsd:string
; rdfs:label "has_rank"^^xsd:string
; rdfs:comment "This is an abstract class for use with the NCBI taxonomy to name the depth of the node within the tree. The link between the node term and the rank is only visible if you are using an obo 1.3 aware browser/editor; otherwise this can be ignored"^^xsd:string
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
for predicate, label in oboInOwl.items():
output.write(
f"""
oboInOwl:{predicate} a owl:AnnotationProperty
; rdfs:label "{label}"^^xsd:string
.
"""
)
for label, parent in predicates.items():
predicate = label_to_id(label)
parent = parent.replace("oboInOwl", "oio")
output.write(
f"""
ncbitaxon:{predicate} a owl:AnnotationProperty
; rdfs:label "{label}"^^xsd:string
; oboInOwl:hasScope "{parent}"^^xsd:string
; rdfs:subPropertyOf oboInOwl:SynonymTypeProperty
.
"""
)
with zipfile.ZipFile(taxdmp_path) as taxdmp:
with taxdmp.open("names.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
tax_id, name, unique, name_class, _ = split_line(line)
if name_class == "scientific name":
labels[tax_id] = name
scientific_names[name].append([tax_id, unique])
else:
synonyms[tax_id].append([name, unique, name_class])
# use unique name only if there's a conflict
for name, values in scientific_names.items():
tax_ids = [x[0] for x in values]
if len(tax_ids) > 1:
uniques = [x[1] for x in values]
if len(tax_ids) != len(set(uniques)):
print("WARN: Duplicate unique names", tax_ids, uniques)
for tax_id, unique in values:
labels[tax_id] = unique
synonyms[tax_id].append([name, unique, "scientific name"])
with taxdmp.open("merged.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
old_tax_id, new_tax_id, _ = split_line(line)
merged[new_tax_id].append(old_tax_id)
with taxdmp.open("citations.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
(
cit_id,
cit_key,
pubmed_id,
medline_id,
url,
text,
tax_id_list,
_,
) = split_line(line)
# WARN: the pubmed_id is always "0", we treat medline_id as pubmed_id
if medline_id == "0":
continue
for tax_id in tax_id_list.split():
if taxa and tax_id not in taxa:
continue
citations[tax_id].append(medline_id)
with taxdmp.open("nodes.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
node = {}
fields = split_line(line)
for i in range(0, min(len(fields), len(nodes_fields))):
node[nodes_fields[i]] = fields[i]
tax_id = node["tax_id"]
if taxa and tax_id not in taxa:
continue
result = convert_node(
node,
labels[tax_id],
merged[tax_id],
synonyms[tax_id],
citations[tax_id],
)
output.write(result)
# TODO: delnodes
output.write(
"""
<http://purl.obolibrary.org/obo/NCBITaxon#_taxonomic_rank> a owl:Class
; rdfs:label "taxonomic rank"^^xsd:string
; rdfs:comment "This is an abstract class for use with the NCBI taxonomy to name the depth of the node within the tree. The link between the node term and the rank is only visible if you are using an obo 1.3 aware browser/editor; otherwise this can be ignored."^^xsd:string
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
for label in ranks:
rank = label_to_id(label)
if rank in ["species_group", "species_subgroup"]:
iri = f"<http://purl.obolibrary.org/obo/NCBITaxon#_{rank}>"
else:
iri = f"NCBITaxon:{rank}"
output.write(
f"""
{iri} a owl:Class
; rdfs:label "{label}"^^xsd:string
; rdfs:subClassOf <http://purl.obolibrary.org/obo/NCBITaxon#_taxonomic_rank>
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
def main():
parser = argparse.ArgumentParser(
description="Convert NCBI Taxonomy taxdmp.zip to Turtle format"
)
parser.add_argument("taxdmp", type=str, help="The taxdmp.zip file to read")
parser.add_argument("taxa", type=str, nargs="?", help="A list of taxa to build")
# TODO: upper, lower
parser.add_argument("turtle", type=str, help="The output Turtle file to write")
args = parser.parse_args()
taxa = None
if args.taxa:
taxa = set()
with open(args.taxa) as taxalist:
for line in taxalist:
taxa.add(line.split()[0])
convert(args.taxdmp, args.turtle, taxa)
if __name__ == "__main__":
main()
|
obophenotype/ncbitaxon
|
src/ncbitaxon.py
|
Python
|
bsd-3-clause
| 12,475
|
[
"BLAST"
] |
6150c014a6ee6362518c025ef996ec42563f9e973ca63052c45af4cf4cd06b30
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Design of Primers for Functional Genes
# <markdowncell>
# Author: Teotonio Soares de Carvalho
# <markdowncell>
# Please contact me at teotonio@msu.edu if you have any suggestions or questions.
# <headingcell level=2>
# Setup for Parallel Processing
# <codecell>
# Requires an IPcluster running with at least one cluster.
# An error will be raised if no cluster is found.
try:
import os
from IPython.parallel import Client
rc = Client()
cwd = os.getcwd()
# To make sure that the works are operating on the same work dir.
rc[:].execute('import os;os.chdir(%s)'%cwd)
dview = rc[:]
dview.block = False
lview = rc.load_balanced_view()
except:
raise Exception("Please, start an IPython cluster to proceed.")
# <codecell>
def wait_on(ar, verbose = True):
"""
Tracks the progress of a task running on a IPcluster. Downloaded from the internet.
"""
from datetime import datetime
N = len(ar.msg_ids)
rc = ar._client
submitted = rc.metadata[ar.msg_ids[0]]['submitted']
while not ar.ready():
ar.wait(1)
msgs = [msg_id not in rc.outstanding for msg_id in ar.msg_ids]
progress = sum(msgs)
dt = (datetime.now()-submitted).total_seconds()
if verbose:
clear_output()
print "%3i/%3i tasks finished after %4i s" % (progress, N, dt),
sys.stdout.flush()
if verbose:
print
print "done"
# <headingcell level=3>
# Import modules
# <codecell>
import time, os, shutil, math, sys, copy, random, time
import regex # Make sure you have installed this module. Use: pip install regex
import pandas # Make sure you have installed this module as well and its dependencies.
import numpy as np # If you installed pandas successfully, this package is already installed
from selenium import webdriver # Selenium and webdriver are installed separately
from selenium.webdriver.support.select import Select
from Bio.Seq import Seq # This also have to be installed: pip install Biopython
from Bio import SeqIO, AlignIO
from Bio.SeqRecord import SeqRecord
from IPython.core.display import clear_output
from itertools import combinations, permutations, izip, product, izip_longest
import datetime as dt
from functools import partial
import primer3
from IPython.core.display import clear_output
import commands
with dview.sync_imports():
from functools import partial
import numpy
import primer3
from Bio import SeqIO, Seq
from string import Template
import regex
import os
from Bio.Seq import Seq
import pandas
from itertools import combinations, izip, product, permutations
# <headingcell level=2>
# Download Archaea amoA Sequences from Fungene
# <codecell>
def load_driver(gene_url):
"""
Loads the fungene web page in chrome and assign it to the global variable "driver".
I could not use the module ghost, which is faster, because the repository and the
analysis page in fungene are in separate tabs.
Requires webdriver, chrome, and selenium.
"""
global driver
if "driver" in dir():
driver.quit()
driver = webdriver.Chrome()
driver.get(gene_url);
driver.find_element_by_partial_link_text("Display Options").click()
seq = Select(driver.find_element_by_id("seqsPerPage"))
seq.select_by_value("2000")
driver.find_element_by_id("displayCmd").submit()
# <codecell>
def filter_by_score(score):
"""
Selects the minimum score as 400 in the repository page.
"""
form = driver.find_element_by_name("hmmId")
driver.find_element_by_link_text("Show/Hide filter options").click()
min_score = driver.find_element_by_name("min_bits")
min_score.send_keys(str(score))
form.submit()
# <codecell>
def find_last_page():
"""
Detects the total number of pages using regular expression.
"""
pages = regex.findall(r"page=([0-9]*)", driver.page_source)
if not pages:
# Added to correct an error when there is only one page
last_page = 1
else:
last_page = max([int(page) for page in pages])
return last_page
# <codecell>
def load_page_repository(page_number, gene_url):
"""
Loads a page, specified by "page_number", for the archaeal amoA
from the fungene repository and select all the sequences in it.
Arguments:
- page_number: an integer specifying the desired page.
"""
url = gene_url + "&page=%d"%page_number
driver.get(url)
driver.find_element_by_link_text("Select Entire Page").click()
get_nucl2prot_accession()
# <codecell>
def remove_fungene(download_path):
"""
Removes files previously download from fungene in the default
download directory used by chrome.
Arguments:
- download_path: a string indicating the default download folder.
"""
for file in os.listdir(download_path):
if regex.match(r"fungene.*?aligned_(nucleotide)*(protein)*_seqs", file):
os.remove(download_path + file)
# <codecell>
def get_nucl2prot_accession():
"""
Extracts the respective acc for protein and nucleotide for each sequence
and saves the results in a file "./data/nucleotide2protein" where each
line is as follow:
acc for protein | acc for nucleotide
"""
reg_exp = regex.compile(r"gpprotdata.jsp\?seqAccno=([0-9A-Z]+).+?"
"gbnucdata.jsp\?seqAccno=([0-9A-Z]+)",
regex.DOTALL|regex.MULTILINE|regex.VERBOSE)
accession = reg_exp.findall(driver.page_source)
accession = "\n".join(["|".join(pair) for pair in accession])
with open("./data/download/nucleotide2protein", "a") as handle:
handle.write(accession + "\n")
# <codecell>
def download_hmm(default_download_path):
"""
Download the hmm for AOA and copy it to ./data/
Arguments:
- download path: string indicating the default download folder
"""
for file in os.listdir(default_download_path):
if ".hmm" in file:
os.remove(default_download_path + file)
driver.find_element_by_link_text("(download HMM)").click()
time.sleep(1)
while True:
files = os.listdir(default_download_path)
file = [file for file in files if ".hmm" in file]
if file:
file = file[0]
break
time.sleep(2)
shutil.copy(default_download_path + file, "./data/download/" + file)
# <codecell>
def switch_to_analysis_window():
"""
Switchs from repository windows to the analysis windows.
Fails if the analysis windows is not already open.
"""
driver.switch_to.window(driver.window_handles[1])
# <codecell>
def switch_to_repository_window():
"""
Switches back to the repository windows.
"""
driver.switch_to.window(driver.window_handles[0])
# <codecell>
def deselect_all_sequences():
"""
Deselect sequences already downloaded in the repository page.
"""
try:
driver.find_element_by_link_text("Deselect All Sequences").click()
except:
pass
# <codecell>
def download_sequences(default_download_path, count):
"""
Initiate the "Begin Analysis" link for the fungene repository to
download the sequences. Downloads the unaligned nucleotides.
"""
driver.find_element_by_link_text("Begin Analysis").click()
switch_to_analysis_window()
for seq_type in ["Nucleotide", "Protein"]:
driver.find_element_by_id("download_%s_seqs"%seq_type).click()
# Uncheck the aligned option if checked.
aligned_option = driver.find_element_by_id("aligned1")
if aligned_option.is_selected():
aligned_option.click()
# Download the sequences
driver.find_element_by_name("download").click()
# If the internet is slow it is better to increase this time
time.sleep(2)
move_file_to_data(default_download_path, count, seq_type)
switch_to_repository_window()
deselect_all_sequences()
# <codecell>
def move_file_to_data(default_download_path, count, seq_type):
exp = r"fungene.*?aligned_%s_seqs"%seq_type.lower()
files = os.listdir(default_download_path)
file = [file for file in files if regex.match(exp, file)][0]
with open(default_download_path + file, "r") as handle:
fasta = handle.read()
os.remove(default_download_path + file)
file_name = "./data/download/%s_%d"%(seq_type.lower(), count)
with open(file_name, "w") as handle:
handle.write(fasta)
# <codecell>
def gather_all_fasta(protein = False):
"""
Gather all download nucleotide unaligned sequences in one fasta
file at "./data/arch_amoa_all.fasta"
Arguments:
- download path: string indicating the default download folder
"""
if protein:
seq_type = "protein"
else:
seq_type = "nucleotide"
with open("./data/download/all_%s"%seq_type, "w") as handle:
for file in os.listdir("./data/download/"):
if regex.match(r"%s_[0-9]+"%seq_type, file):
with open("./data/download/" + file, "r") as handle_split:
handle.write(handle_split.read())
# <codecell>
def main_download(hmm_id, default_download_path, score):
"""
Downloads the proteins, nucleotides, hmm file and the correspondence between acession numbers from proteins
and nucleotides.
Arguments:
-hmm_id: an integer giving the fungene hmm_id for a particular gene. You can get it by clicking in the gene link
in the fungene database. In the url that appears in the address bar you will see the hmm_id.
-default_download_path: string giving the default path for download for chrome.
-score: an integer giving the minimum hmm score for the sequences. Be careful with this parameter as it is very
gene dependent. That is why there is no default value.
This function uses chromedriver to automate the download. While the browser is working to download the files you should
not interact with it or unexpected results may arise. It usually takes about 2 minutes to complete the download on a
good network. If you notice that any of the download fails, remove the data folder, restart the notebook and run again.
"""
# Remove previous files to avoid errors
if os.path.isdir("./data/"):
shutil.rmtree("./data/")
os.makedirs("./data/")
os.makedirs("./data/download/")
gene_url = "http://fungene.cme.msu.edu/hmm_details.spr?hmm_id=%d"%hmm_id
remove_fungene(default_download_path)
load_driver(gene_url)
download_hmm(default_download_path)
filter_by_score(score)
last_page = find_last_page()
deselect_all_sequences()
count = 1
for page_number in range(1, last_page + 1):
load_page_repository(page_number, gene_url)
# Download the sequences when repository page is multiple
# of 5 or the last page
if not page_number % 5 or page_number == last_page:
download_sequences(default_download_path, count)
time.sleep(1)
count += 1
gather_all_fasta()
gather_all_fasta(protein = True)
# <headingcell level=2>
# Align Protein
# <codecell>
def read_fasta(filename):
"""
Reads a fasta file and return it as a list of records.
Arguments:
- filename: string indicating the name of the file
including the path.
Returns a list of records formatted by BioPython.
"""
with open(filename, "rU") as handle:
records = list(SeqIO.parse(handle, "fasta"))
return records
def write_fasta(filename, records_list):
"""
Takes a list of records (BioPython) and writes it to filename.
"""
with open(filename, "w") as handle:
fasta_writer = SeqIO.FastaIO.FastaWriter(handle)
fasta_writer.write_file(records_list)
def make_dict_records(fasta_file_name):
"""
Returns a dict of records where the keys are the accession
number and the values are the records (Biopython)
"""
records = read_fasta(fasta_file_name)
records_dict = {record.name:record for record in records}
return records_dict
# <codecell>
def align_protein():
"""
Takes the unaligned proteins in ./data/download and align them using the
hmm profile.
"""
file = [file for file in os.listdir("./data/download/") if ".hmm" in file][0]
cmd = ("hmmalign "
"./data/download/%s "
"./data/download/all_protein "
"> ./data/download/aligned_prot ")%file
print "Aligning sequences"
process = os.system(cmd)
if process:
print cmd
raise
print "Reading Alignment"
alignment = AlignIO.read(open("./data/download/aligned_prot"), "stockholm")
print "Writing Alignment"
write_fasta("./data/download/aligned_prot", alignment)
sys.stdout.flush()
# <headingcell level=2>
# Align Nucleotides Using Proteins
# <codecell>
def get_nuc2prot():
"""
Returns a dict of nucleotide accessions numbers as keys and
protein acession numbers as values.
"""
nuc2prot_acc = {}
with open("./data/download/nucleotide2protein", "r") as handle:
line = handle.readline()
while line:
prot, nuc = line.split("|")
nuc2prot_acc[nuc[:-1]] = prot
line = handle.readline()
return nuc2prot_acc
# <codecell>
def align_nucleotides():
"""
Takes the protein alignment positions and align the nucleotide codons
based on that.
Raises an error if the frame is not corrected.
"""
nucleotides = make_dict_records("./data/download/all_nucleotide")
proteins = make_dict_records("./data/download/aligned_prot")
nuc2prot_acc = get_nuc2prot()
aligned_nucleotides = []
for nuc_acc, nucleotide in nucleotides.iteritems():
protein = proteins[nuc2prot_acc[nuc_acc]]
prot_unaligned = regex.sub(r"[-\.]", "", str(protein.seq))
prot_transl = str(nucleotide.seq.translate())
#Checking if frames are right
nuc_seq = str(nucleotide.seq)
protein_seq = str(protein.seq)
codons = [nuc_seq[i: i+3] for i in xrange(0, len(nuc_seq), 3)]
aligned_seq = []
for position in protein_seq:
if position == "." or position == "-":
aligned_seq += ["---"]
else:
if codons:
codon = codons.pop(0)
aligned_seq += codon
else:
aligned_seq += ["---"]
nucleotide.seq = Seq("".join(aligned_seq))
aligned_nucleotides += [nucleotide]
if not prot_transl.upper()[1:-1] in prot_unaligned.upper():
print "Incorrect frame!"
raise
write_fasta("./data/download/aligned_nucleotides", aligned_nucleotides)
# <headingcell level=2>
# Trimm Alignment
# <codecell>
def count_invalid_pos(file_name = None, records = None):
"""
Takes an aligment and counts the number of gaps "." or "-".
Arguments:
- file_name: if given, the function reads the fasta file.
- records: a list of records from an aligment (optional)
Only one of these two arguments must be provided.
"""
if not records:
records = read_fasta(file_name)
nseq = len(records)
seqs = [str(record.seq.upper()) for record in records]
align_pos = izip(*seqs)
count_pos = {}
for count, bases in enumerate(align_pos):
count_pos[count] = len(regex.findall(r"[-\.]", "".join(bases)))
count = nseq - pandas.Series(count_pos)
return (count, nseq)
# <codecell>
def trimm_columns(records, prop_non_gap = .001, column_info_prop_discard = .3):
"""
Removes columns of the aligment in both ends where the proportion of
gaps is higher than 30%. It DOESN'T remove positions in the middle of the
aligment.
"""
count, nseq = count_invalid_pos(records = records)
drop_threshold = prop_non_gap * nseq
richer_pos = count[count / float(nseq) > column_info_prop_discard]
start = richer_pos.index[0]
end = richer_pos.index[-1]
count_richer_pos = count[start:end + 1]
positions_to_drop = list(count_richer_pos[count_richer_pos <= drop_threshold].index)
positions_to_keep = [i for i in range(start, end + 1)]
trimmed_records = []
for record in records:
seq = str(record.seq)
seq = "".join([seq[i] for i in positions_to_keep])
record.seq = Seq(seq)
trimmed_records += [record]
return trimmed_records
# <codecell>
def dealign_seq(in_file_name = "./data/trimmed_align",
out_file_name = "./data/unaligned_trimmed"):
"""
Removes gaps from sequences.
"""
records = read_fasta(in_file_name)
for record in records:
record.seq = Seq(regex.sub(r"[\.-]", "", str(record.seq))).upper()
record.description = ""
write_fasta(out_file_name, records)
# <codecell>
def trimm_records(max_prop_gap_ends = .1, column_info_prop_discard = .3):
"""
Removes positions of the alignment with less than a specified threshold of information (bases) and
discards sequences (after trimming the positions) with more a threshold of its length as gaps at any
of its ends.
Arguments:
-column_info_prop_discard: minimal allowed proportion of information (bases) in the columns at both ends
of the aligment. The purporse is to delete columns in both ends for which information
is not available for most sequences.
-max_prop_gap_ends: maximal proportion of the length a sequence (after trimming columns as above) that consists of
continuous gaps in any of the ends of the sequence. The purpose is to remove sequences are too
short and cannot be used to test the primers.
"""
records = read_fasta("./data/download/aligned_nucleotides")
trimmed_columns = trimm_columns(records, column_info_prop_discard = column_info_prop_discard)
npos = len(trimmed_columns[0].seq)
max_gap_ends = max_prop_gap_ends * npos
records_to_drop = 0
records_to_keep = []
dropped = 0
for count, record in enumerate(trimmed_columns):
seq = str(record.seq)
gaps = regex.search("(?P<start>^[-\.]*).+?(?P<end>[-\.]*$)", seq)
if (len(gaps["start"]) >= max_gap_ends) or (len(gaps["end"]) >= max_gap_ends):
dropped += 1
records_to_drop += 1
else:
records_to_keep += [record]
write_fasta("./data/trimmed_align", records_to_keep)
print "%d sequences had more gaps in their ends than the specified threshold and were deleted."%dropped
# <headingcell level=2>
# Remove redundancy at 100% Similarity
# <codecell>
def cluster_sequences(input_file, output_file, similarity, word_size):
"""
Cluster all sequences and write two files:
- ./data/arch_amoa_repr.clstr with information for each cluster
- ./data/arch_amoa_repr a fasta file with representatives (not used)
"""
if os.path.isfile(output_file):
os.remove(output_file)
cmd = Template(
"cd-hit-est -i $input_file " # Input File
"-o $output_file " # Output File
"-c $similarity " # Threshold similarity
"-n $word_size " # Word size (see manual)
"-T 0 " # Use all processors
"-M 0") # Use all memory
cmd = cmd.substitute(input_file = input_file,
output_file = output_file,
similarity = similarity,
word_size = word_size)
print cmd
process = os.system(cmd)
if process:
print cmd
raise
# <headingcell level=2>
# Create fasta file for each cluster
# <markdowncell>
# This code was reused from previous versions. That is the reason for the fancy Arch_Group class
# <codecell>
def make_split_groups_out(file_name):
"""
Breaks the output of cd-hit and return it as a list of
strings were each element correspond to a group.
"""
with open(file_name, "r") as handle:
groups = handle.read()
groups_split = groups.split(">Cluster")[1:]
return groups_split
# <codecell>
class Arch_Group:
"""
Used to parse the results from cd-hist. An instance of Arch_Group is a group of
nucleotide sequences that are 97% similar.
The instance have the following properties:
- name: the name of the representative sequence
- representative(deprecated): same as above
- n_members: number of sequences in group
- members: a list of strings with members accession number
- nuc_members: list of integers giving the number of nuc for
each member.
- n_invalid(deprecated): number of non ACTG characters in the representative seq
- seq(deprecated): the seq of the representative.
Note:
The representative is not used in the subsequent analysis. Instead of using
a representative sequence, I use the consensus in the next steps. But the
groups will be identified by the representative acc number.
"""
def __init__(self,group, dict_fasta):
self.get_representative(group)
self.name = self.representative
self.get_members(group)
self.get_nuc_numbers(group)
self.n_members = len(self.members)
self.seq = dict_fasta[self.name].seq
self.get_n_invalid_bases_representative()
def __repr__(self):
return "Group: %s, %d sequence(s)"%(self.name, self.n_members)
def get_representative(self, group):
repr_id = regex.findall(r"\>([A-Z|0-9]*?[_0-9]*)\.\.\.\ \*", group)[0]
self.representative = repr_id
def get_members(self, group):
seqs_id = regex.findall(r"\>([A-Z|0-9]*?[_0-9]*)\.\.\.", group)
self.members = seqs_id
def get_nuc_numbers(self, group):
nuc_numbers = regex.findall(r"([0-9]{1,4})nt,", group)
nuc_numbers = [int(num) for num in nuc_numbers]
self.nuc_numbers = nuc_numbers
def get_sequences(self, records_dict):
self.sequences = [records_dict[id] for id in self.sequences_ids]
def get_n_invalid_bases_representative(self):
n_invalid = len(regex.findall(r"[^atgc]", str(self.seq)))
self.n_invalid = n_invalid
def get_consensus_record(self):
record = read_fasta("./data/consensus/%s.fasta"%self.name)
self.consensus_record = record
def get_members_record_list(self):
records = read_fasta("./data/groups/%s.fasta"%self.name)
self.members_record_list = records
# <codecell>
def make_dict_groups(group_file_name = "./data/cluster_97.clstr",
fasta_file_name = "./data/contigs_100"):
"""
Parses the arch_amoa_repr.clstr and returns a dict
where the key is the name of the group and the values
are instances of the Arch_Group
"""
dict_fasta = make_dict_records(fasta_file_name)
split_groups_out = make_split_groups_out(group_file_name)
groups = [Arch_Group(group, dict_fasta) for group in split_groups_out]
groups = {group.name:group for group in groups}
return groups
# <codecell>
def make_fasta_for_groups(dict_groups, dict_fasta, path_name):
"""
Creates one fasta file for each groups containing the sequences
for that group.
Arguments:
- dict_groups: a dict of groups as returned by make_dict_groups
- dict_fasta: a dict of sequences
- path_name: the path where the fasta file for the grouped must be saved
"""
if os.path.isdir(path_name):
shutil.rmtree(path_name)
os.makedirs(path_name)
for group_name, group in dict_groups.iteritems():
records = [dict_fasta[member] for member in group.members]
write_fasta(path_name + group_name, records)
# <codecell>
def main_make_fasta_from_groups(group_name, fasta_file_name, path_name):
groups = make_dict_groups(group_name, fasta_file_name)
dict_fasta = make_dict_records(fasta_file_name)
make_fasta_for_groups(groups, dict_fasta, path_name)
# <headingcell level=2>
# Step 05. Make Consensus Sequences
# <codecell>
def make_consensus(group_name, aligned_path, consensus_path, plurality):
"""
Find the consensus sequence for each aligned fasta file
in ./data/aligned/
Arguments:
- group_name: string indicating the name of the group
- aligned_path: string indicating the path where the aligned sequences are.
- consensus_path: string indicating where the consensus are.
- plurality: the minimal proportion of agreement that must be at a given position for
the consensus to receive the most frequent base at that position.
Writes the consensus sequences to ./data/consensus
"""
n_seq = len(read_fasta(aligned_path + group_name))
min_agreement = int(plurality * n_seq)
cmd = Template("cons " # From emboss
"$aligned_path$group_name " # Input fasta
"$consensus_path$group_name " # Output fasta
"-name $group_name " # The consensus sequence is named with the group name
"-plurality $min_agreement " # See description of the function above
)
cmd = cmd.substitute(aligned_path = aligned_path,
consensus_path = consensus_path,
group_name = group_name,
min_agreement = min_agreement)
process = os.system(cmd)
if process:
raise RuntimeError('program {} failed!'.format(cmd))
# <codecell>
def gather_all_consensus(consensus_path, consensus_name):
"""
Arguments:
Gather the individual consensus file in a unique file.
- consensus_path: the path where the consensus file are.
- consensus_name: the name of the resulting consensus file.
"""
records = ""
for file in os.listdir(consensus_path):
with open(consensus_path + file, "r") as handle:
records += handle.read()
with open(consensus_name, "w") as handle:
handle.write(records)
# <codecell>
def main_parallel_consensus(consensus_path, aligned_path,
consensus_name, fasta_file_name,
group_file_name,
plurality):
"""
Run the previous functions in parallel.
Arguments:
- consensus_path: string indicating the path where the individual consensus are.
- aligned_path: the path were the individual aligments are.
- consensus_name: the name of the consensus file with all consensus sequences.
- fasta_file_name: the original sequences from which the consensus were made.
- group_file_name: the file indicating groups from cd-hit
- plurality: the minimal proportion of agreement that must be at a given position for
the consensus to receive the most frequent base at that position.
"""
if os.path.isdir(consensus_path):
shutil.rmtree(consensus_path)
os.makedirs(consensus_path)
groups = make_dict_groups(group_file_name = group_file_name,
fasta_file_name = fasta_file_name)
cmd_template = Template("cp $aligned_path$group_name $consensus_path$group_name")
for group in groups.values():
if group.n_members == 1:
cmd = cmd_template.substitute(consensus_path = consensus_path,
aligned_path = aligned_path,
group_name = group.name)
os.system(cmd)
# Clusters are only sent to work if the number of members > 1
clusters = [group_name for group_name, group in groups.iteritems()\
if group.n_members > 1]
# Import os in all engines (nodes)
dview.execute("import os")
# Send the make_consensus function to all engines
dview.push({"make_consensus":make_consensus,
"read_fasta":read_fasta})
# Map/Reduce
task = lview.map(partial(make_consensus,
aligned_path = aligned_path,
consensus_path = consensus_path,
plurality = plurality),
clusters)
wait_on(task)
if not task.successful():
raise Exception("Consensus failed!")
gather_all_consensus(consensus_path = consensus_path,
consensus_name = consensus_name)
# <headingcell level=3>
# Classify Sequences
# <codecell>
def get_seqs_id_from_pester():
"""
Extracts relevant information about sequences (name, full_name, acc,
Taxon_Level1 and Seq_Len) as given by Pester et al 2012 in the arb
database provided as supplementary material.
This function only applies for the analysis of Archaea amoA
"""
# Read data from the NDS file exported from arb
seq_all = pandas.read_table("./Pester Consensus Data/pester.nds",
header=None)
# Rename the columns
seq_all.columns = ["name", "full_name", "taxonomy", "acc",
"Taxon_Level_1", "Taxon_Level_2", "Taxon_Level_3",
"Seq_Len", "Habitat", "unknown"]
# Select rows from sequences in the tree.
selected_rows = seq_all.Taxon_Level_1.notnull()
# Select relevant columns for subsequent analysis
selected_columns = ["name", "Taxon_Level_1", "Taxon_Level_2", "Taxon_Level_3"]
seq_valid = seq_all.ix[selected_rows, selected_columns]
return seq_valid
# <headingcell level=3>
# Screen Oligos
# <codecell>
def enumerate_oligos(starts, kmer_sizes, seqs_all, look_ahead = 50):
"""
Enumerate all possible oligos (kmers) with sizes kmer_sizes from
the aligment.
"""
unique = {}
for start in starts:
# To account for gaps, I choose a region much bigger than kmer_size
seqs = [seq[start:start + look_ahead].replace("-", "") for seq in seqs_all]
for kmer_size in kmer_sizes:
oligos = [seq[:kmer_size] for seq in seqs]
for count, oligo in enumerate(oligos):
if oligo:
if not oligo in unique:
unique[oligo] = [count]
else:
unique[oligo] += [count]
unique_set = {oligo:set(accs) for oligo, accs in unique.iteritems()}
return unique_set
# <codecell>
def filter_oligos(oligos_dict, primer_conc,
hairpin_tm_max, homo_tm_max,
tm_max, tm_min, min_occurrence, no_3_T,
no_poly_3_GC, no_poly_run, max_degen,
mv_conc, dv_conc, rev):
"""
Removes oligos that don't have desirable properties.
"""
# Rescale concentration of primers
primer_conc_resc = primer_conc/float(max_degen)
# Remove oligos that occur less than min_occurrence
if type(oligos_dict.values()[0]) == int:
oligos = [oligo for oligo, value in oligos_dict.iteritems() if \
value >= min_occurrence]
else:
oligos = [oligo for oligo, value in oligos_dict.iteritems() if \
len(value) >= min_occurrence]
# It is necessary to use the reverse of the complement for the reverse primers
if rev:
oligos = [str(Seq(oligo).reverse_complement()) for oligo in oligos]
# Apply empirical rules
oligos = apply_rules(oligos, rev, no_3_T, no_poly_3_GC)
# Remove kmers with hairpin tm > threshold
calc_hairpin = partial(primer3.calcHairpinTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
hairpin = map(calc_hairpin, oligos)
oligos = [oligo for oligo, hp_tm in zip(oligos, hairpin) if hp_tm <= hairpin_tm_max]
# Remove kmers with homodimers tm > threshold
calc_homo_tm = partial(primer3.calcHomodimerTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
homo_tm = map(calc_homo_tm, oligos)
oligos = [oligo for oligo, homo_tm in zip(oligos, homo_tm) if homo_tm <= homo_tm_max]
# Remove kmers with poly runs
if no_poly_run:
polys = ["AAAA", "TTTT", "GGGG", "CCCC"]
find_poly = lambda x: not any([True for poly in polys if poly in x])
oligos = filter(find_poly, oligos)
# Remove primers with tm above threshold
calc_tm = partial(primer3.calcTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
tms = map(calc_tm, oligos)
for oligo, tm in zip(oligos, tms):
if tm < tm_min or tm > tm_max:
oligos.remove(oligo)
else:
pass
# For compatibility with the dictionaries, I will return the rev primers for their original
if rev:
oligos = [str(Seq(oligo).reverse_complement()) for oligo in oligos]
return oligos
# <codecell>
def apply_rules(oligos, rev, no_3_T, no_poly_3_GC):
"""
Apply some empirical rules for primer design.
"""
# I invert the oligo when it is the reverse so that I can treat the 3 terminal equally
if no_poly_3_GC:
find_poly_GC = lambda x: not any([True for poly in ["GGG", "CCC"] if poly in x[-3:]])
oligos = filter(find_poly_GC, oligos)
if no_3_T:
oligos = filter(lambda x: not x[-1] == "T", oligos)
return oligos
# <codecell>
def discard_redundant(oligo_series, max_degen, rev,
primer_conc, verbose, min_diff,
mv_conc, dv_conc):
"""
From reduntant oligos (oligos that detects exactly the same sequences) select those who have
highest melting temperature and keep the remaining in a dictionary for further reuse if the
representant is discarded for any reason.
"""
# Rescale primer concentration
primer_conc_resc = primer_conc/float(max_degen)
to_remove = []
if rev:
oligos = [str(Seq(oligo).reverse_complement()) for oligo in oligo_series.index]
else:
oligos = [oligo for oligo in oligo_series.index]
redundant = {}
calc_tm = partial(primer3.calcTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
tms = map(calc_tm, oligos)
tms = {oligo:tm for tm, oligo in zip(tms, oligo_series.index)}
for count, base_primer in enumerate(oligo_series.index):
if verbose:
clear_output()
print "Iteration %d of %d" % (count, len(oligo_series))
sys.stdout.flush()
if base_primer in to_remove:
continue
for primer in oligo_series.index:
if primer == base_primer:
continue
if primer in to_remove:
continue
union = oligo_series[base_primer].union(oligo_series[primer])
diff = len(union) - len(oligo_series[base_primer])
size_diff = len(oligo_series[base_primer]) - len(oligo_series[primer])
if numpy.abs(diff) <= min_diff and size_diff <= min_diff:
# If two oligos match the same sequences, keep the one with highest Tm
if tms[primer] < tms[base_primer]:
to_remove += [primer]
# Keep a record of the redundant oligos because they may be reused if the
# one chose here is discarded later
if not base_primer in redundant:
redundant[base_primer] = [primer]
else:
redundant[base_primer] += [primer]
# If the primer that was removed contains other redundant primers
# add them to the dict as well in the key corresponding to the selected primer
if primer in redundant:
redundant[base_primer] += redundant[primer]
else:
to_remove += [base_primer]
if not primer in redundant:
redundant[primer] = [base_primer]
else:
redundant[primer] += [base_primer]
if base_primer in redundant:
redundant[primer] += redundant[base_primer]
break
to_keep = [index for index in oligo_series.index if not index in to_remove]
return {"oligo_series":oligo_series[to_keep], "redundant":redundant}
# <codecell>
def find_valid_positions(seqs_all, max_gap_prop):
"""
Find positions in the aligment that are not mostly gaps.
"""
positions = zip(*seqs_all)
count_gaps = lambda x: len([base for base in x if base == "-"])
gaps = map(count_gaps, positions)
max_gaps = len(seqs_all) * max_gap_prop
pos = range(len(positions))
valid_pos = filter(lambda x: x[1] <= max_gaps, zip(pos, gaps))
valid_pos = [p[0] for p in valid_pos]
return valid_pos
# <codecell>
def enumerate_positions_for_screen(fasta_file, kmer_sizes, step,
max_gap_prop):
"""
Calculates the positions where oligos should be enumerated based on the
step parameter.
"""
# Select valid columns
records = read_fasta(fasta_file)
n_seq = float(len(records))
seqs_all = [str(record.seq) for record in records]
valid_pos = find_valid_positions(seqs_all, max_gap_prop)
# Define positions were oligos will be enumerated
last_pos = valid_pos[ :-min(kmer_sizes)]
pos = [p for p in valid_pos if p < last_pos]
starts = [pos[i] for i in range(0, len(pos), step)]
return (starts, seqs_all, n_seq)
# <codecell>
def enumerate_kmers_screen(start, starts, seqs_all, n_seq, kmer_sizes,
hairpin_tm_max, homo_tm_max,
tm_max, tm_min, min_occurrence, no_3_T,
no_poly_3_GC, max_degen, no_poly_run,
primer_conc, min_diff, mv_conc, dv_conc, look_ahead):
"""
Apply the functions above to enumerate all possible oligos that match some criteria
specified in its arguments along a given alignemnt. This is used for screening positions
in the alignment that might be useful for designing primers.
"""
# Rescale primer concentration
primer_conc_resc = primer_conc/float(max_degen)
# After the middle of the alignment, primers will be tested as reverse
rev = False
if start > starts[len(starts) / 2]:
rev = True
unique_kmers = enumerate_oligos(starts = [start],
kmer_sizes = kmer_sizes,
seqs_all = seqs_all,
look_ahead = look_ahead)
kmers = filter_oligos(oligos_dict = unique_kmers,
rev = rev,
primer_conc = primer_conc,
hairpin_tm_max = hairpin_tm_max,
homo_tm_max = homo_tm_max,
tm_max = tm_max,
tm_min = tm_min,
min_occurrence = min_occurrence,
no_3_T = no_3_T,
no_poly_3_GC = no_poly_3_GC,
max_degen = max_degen,
no_poly_run = no_poly_run,
mv_conc = mv_conc,
dv_conc = dv_conc)
unique_kmers = {i:unique_kmers[i] for i in kmers}
kmers_series = pandas.Series(unique_kmers)
cover = kmers_series.map(len).sort(inplace = False, ascending = False)
kmers_series = kmers_series[cover.index]
not_redundant = discard_redundant(kmers_series,
max_degen = max_degen,
rev = rev,
primer_conc = primer_conc,
verbose = False,
min_diff = min_diff,
mv_conc = mv_conc,
dv_conc = dv_conc)
kmers_series = not_redundant["oligo_series"]
unique_kmers = {kmer:unique_kmers[kmer] for kmer in \
kmers_series.index}
# Selected the n (max_degen) best oligos
sequences_detected = kmers_series.map(len)
sequences_detected.sort(ascending = False)
best_primers = sequences_detected.index[:max_degen]
detected = set()
for oligo in best_primers:
detected = detected.union(kmers_series[oligo])
coverage = len(detected) / n_seq
unique_kmers = {key:unique_kmers[key] for key in best_primers}
calc_tm = partial(primer3.calcTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
tm = map(calc_tm, unique_kmers.keys())
if len(tm):
tms_median = numpy.median(tm)
tms_10 = numpy.percentile(tm, 10)
tms_90 = numpy.percentile(tm, 90)
else:
tms_median, tms_10, tms_90 = (0, 0, 0)
# After these filtering, calculate coverage and richness of the most
# abundant oligos
richness = len(unique_kmers)
return ((start, {"Richness":richness,
"Coverage":coverage,
"Median_Tm":tms_median,
"Tm_10":tms_10,
"Tm_90":tms_90}),
unique_kmers, tm)
# <codecell>
def screen_oligos(fasta_file, kmer_sizes, hairpin_tm_max = 35, homo_tm_max = 35,
tm_max = 65, tm_min = 50, min_occurrence = 5, no_3_T = True,
no_poly_3_GC = True, max_degen = 60, no_poly_run = True,
step = 3, primer_conc = 200, max_gap_prop = .1,
min_diff = 0, mv_conc = 50, dv_conc = 1.5, look_ahead = 50):
"""
Apply the function enumerate_kmers_screen in parallel for enumerating all possible oligos that match some criteria
specified in its arguments along a given alignment. This is used for screening positions
in the alignment that might be useful for designing primers.
Arguments:
-fasta_file: a string given the name of the fasta file with aligned sequences to be used for enumeration of oligos;
-kmer_sizes: a list of integers with the desired size of oligos;
-hairpin_tm_max: maximal hairpin melting temperature allowed (in Celsius degrees);
-homo_tm_max: maximal homodimer melting temperature allowed (in Celsius degrees);
-tm_max: maximal melting temperature allowed for a oligo (in Celsius degrees);
-tm_min: minimal melting temperature allowed for a oligo (in Celsius degrees);
-min_occurrence: integer. Minimal allowed occurrence of a oligo along all sequences;
-no_3_T: boolean. Should oligos with a T in the 3' end be discarded?
-no_poly_3_GC: boolean. Should oligos with three G's of C's in the 3' end be discarded?
-max_degen: the maximal number of subprimers desired. This will also be used to rescale the oligo concentration.
-no_poly_run: boolean. Should oligos with four or more runs of the same bases be discarded?
-step: distance between positions in the aligment from which primers should be enumerated. A step of 1 implies that
all positions will be used.
-primer_conc: total primer concentration in nM. This concentration will be rescaled automatically by the max_degen.
-max_gap_prop: float. Maximal proportion of gaps allowed for any position where oligos will be enumerated.
-min_diff: minimal difference of sequences detected for a oligo to be considered redundant. This parameter should be
kept at its default value unless you have strong reasons to change it.
-mv_conc: monovalent ions concentration in mM.
-dv_conc: divalent ions conentration in mM.
"""
starts, seqs_all, n_seq = enumerate_positions_for_screen(\
fasta_file,
kmer_sizes = kmer_sizes,
max_gap_prop = max_gap_prop,
step = step)
kwargs = {"starts":starts,
"seqs_all":seqs_all,
"n_seq":n_seq,
"kmer_sizes":kmer_sizes,
"hairpin_tm_max":hairpin_tm_max,
"homo_tm_max":homo_tm_max,
"tm_max":tm_max,
"tm_min":tm_min,
"min_occurrence":min_occurrence,
"no_3_T":no_3_T,
"no_poly_3_GC":no_poly_3_GC,
"max_degen":max_degen,
"no_poly_run":no_poly_run,
"primer_conc":primer_conc,
"min_diff":min_diff,
"mv_conc":mv_conc,
"dv_conc":dv_conc,
"look_ahead":look_ahead}
p = partial(enumerate_kmers_screen, **kwargs)
dview.push({"enumerate_kmers_screen":enumerate_kmers_screen,
"discard_redundant":discard_redundant,
"filter_oligos":filter_oligos,
"enumerate_oligos":enumerate_oligos,
"apply_rules":apply_rules})
task = lview.map(p, starts, chunksize = 20)
wait_on(task)
pos = {res[0][0]:res[0][1] for res in task.result}
unique_kmers = [(res[0][0], res[1]) for res in task.result]
data = pandas.DataFrame(pos).T
data["Pos"] = data.index
return {"data":data, "unique_kmers":unique_kmers}
# <codecell>
def combine_positions(positions):
all = []
for pos in positions:
all += unique_sets[pos]
return set(all)
# <headingcell level=3>
# Enumerate oligos using specified positions
# <markdowncell>
# Some of the functions used in this section were defined in the previous section
# <codecell>
def unite_pairs(pair):
set_pair = fwd_unique[pair[0]].intersection(rev_unique[pair[1]])
return (pair, set_pair)
# <codecell>
def enumerate_pairs(fwd_unique, rev_unique):
"""
Combine fwd and rev primers as pairs and return it as a dict where the key is the pair itself
and the values are the set of sequences detected by the pair.
"""
pairs_oligos = list(product(fwd_unique.index, rev_unique.index))
dview.push({"fwd_unique":fwd_unique,
"rev_unique":rev_unique,
"unite_pairs":unite_pairs})
if not len(pairs_oligos):
print "No primers were found!"
return
task = lview.map(unite_pairs, pairs_oligos, chunksize = 1000)
wait_on(task)
pairs = {pair:set for pair, set in task.result}
return pairs
# <codecell>
def filter_pair(fwd, rev, max_delta, max_tm_ht, dv_conc, mv_conc, primer_conc):
"""
Tests whether or not a pair o primer is compatible
"""
rev_comp = str(Seq(rev).reverse_complement())
fwd_tm = primer3.calcTm(fwd, mv_conc = mv_conc,
dv_conc = dv_conc,
dna_conc = primer_conc)
rev_tm = primer3.calcTm(rev_comp, mv_conc = mv_conc,
dv_conc = dv_conc,
dna_conc = primer_conc)
is_delta_high = np.abs(fwd_tm - rev_tm) > max_delta
ht_tm = primer3.calcHeterodimerTm(fwd, rev_comp,
mv_conc = mv_conc,
dv_conc = dv_conc,
dna_conc = primer_conc)
is_heterodimer = ht_tm > max_tm_ht
return is_delta_high or is_heterodimer
# <codecell>
def update_redundancy(redundancy, new_key, rev = False):
oligo_key = "rev" if rev else "fwd"
for key, value in redundancy[oligo_key].iteritems():
if new_key in value:
redundancy[oligo_key][new_key] = redundancy[oligo_key][key]
return redundancy
# <codecell>
def test_heterodimer(fwd, rev, included_fwd, included_rev_comp, max_tm_ht, mv_conc, dv_conc, primer_conc):
if not len(included_rev_comp) and not len(included_fwd):
return False
rev_comp = str(Seq(rev).reverse_complement())
for oligo in included_rev_comp + included_fwd:
for candidate in [rev_comp, fwd]:
ht_tm = primer3.calcHeterodimerTm(oligo,
candidate,
mv_conc = mv_conc,
dv_conc = dv_conc,
dna_conc = primer_conc)
if ht_tm >= max_tm_ht:
return True
# <codecell>
def select_pairs(fwd_dict, rev_dict, redundancy_dict, pairs, max_degen = 100, max_delta = 5,
max_tm_ht = 35, min_increase = 5, primer_conc=200, mv_conc = 50, dv_conc = 1.5):
"""
Select primers as pairs based on their coverage and compatibility.
Arguments:
-fwd_dict: a dict of forward primers as returned by the function enumerate_primers;
-rev_dict: a dict of reverse primers as returned by the function enumerate_primers;
-redundancy_dict: a dict of primers grouped by their redundancy as returned by enumerate_primers;
-pairs: a dict with pairs of primers as keys and the sequences detected by the pair as values. This
dict is created by the function enumerate_primers;
-max_degen: maximal number of subprimers to be kept;
-max_delta: maximal absolute difference in melting temperature between primers in a pair.
-max_tm_ht: maximal heterodimer melting temperature;
-min_increase: minimal number of new sequences detected for a candidate pair to be selected;
"""
best = set()
included_fwd = []
included_rev = []
included_rev_comp = []
pairs_series = pandas.Series(pairs)
cover = pairs_series.map(len).sort(inplace = False, ascending = False)
pairs_series = pairs_series[cover.index]
# Rescale concentration of primers
primer_conc_resc = primer_conc/float(max_degen)
# Increase the min_increase for the first iterations
current_min_increase = min_increase + 100
while True:
sys.stdin.readline() # Just to display the results in real time.
increase = 0
to_del = []
reject_pair = False
fwd_degen_reached = len(included_fwd) > max_degen
rev_degen_reached = len(included_rev) > max_degen
for fwd, rev in pairs_series.index:
# If the number of sequences detected by the pair is smaller than min_increase,
# delete it and go to next iteration.
if len(pairs[(fwd, rev)]) < min_increase:
to_del += [(fwd, rev)]
continue
will_degen_exceed_fwd = (fwd not in included_fwd) and fwd_degen_reached
will_degen_exceed_rev = (rev not in included_rev) and rev_degen_reached
is_not_compatible = filter_pair(fwd = fwd, rev = rev, max_delta = max_delta,
max_tm_ht = max_tm_ht, dv_conc = dv_conc,
mv_conc = mv_conc, primer_conc = primer_conc_resc)
if is_not_compatible:
# When a pair is not compatible, try to look for redundant oligos that are compatible
new_pair = reuse_redundant(fwd = fwd, rev = rev, redundancy_dict = redundancy_dict,
max_delta = max_delta, max_tm_ht = max_tm_ht,
dv_conc = dv_conc, mv_conc = mv_conc,
primer_conc = primer_conc_resc)
if new_pair:
new_fwd, new_rev = new_pair
pairs[(new_fwd, new_rev)] = pairs[(fwd, rev)]
fwd, rev = new_fwd, new_rev
is_not_compatible = False
# If a pair is incompatible and no substitute could be found or
# if the degeration was reached, delete the pair and go to next iteration.
if will_degen_exceed_fwd or will_degen_exceed_rev or is_not_compatible:
to_del += [(fwd, rev)]
continue
# If both primers were already included in previous pairs, add the detected sequences
# to the set of detected sequences, delete the pair, and go to next iteration.
if fwd in included_fwd and rev in included_rev:
best = best.union(pairs[(fwd, rev)])
to_del += [(fwd, rev)]
continue
is_heterodimer = test_heterodimer(fwd = fwd, rev = rev,
included_fwd = included_fwd,
included_rev_comp = included_rev_comp,
max_tm_ht = max_tm_ht,
mv_conc = mv_conc, dv_conc = dv_conc,
primer_conc = primer_conc_resc)
if is_heterodimer:
to_del += [(fwd, rev)]
continue
# If a pair survived the previous conditions, test it.
union = pairs[fwd, rev].union(best)
increase = len(union) - len(best)
# If the pair increases the coverage, add it to the included oligos list
# delete the pair and stop this internal loop
if increase >= current_min_increase:
best = best.union(pairs[(fwd, rev)])
if not fwd in included_fwd:
included_fwd += [fwd]
if not rev in included_rev:
included_rev += [rev]
included_rev_comp = [str(Seq(rev_i).reverse_complement()) for rev_i in included_rev_comp]
to_del += [(fwd, rev)]
break
elif increase < min_increase:
to_del += [(fwd, rev)]
# If no pair gave a high enough increase, reduce the current_min_increase by 20
# until reach the min_increase specified by the user
if increase < current_min_increase:
current_min_increase -= 20
if current_min_increase < min_increase:
break
if fwd_degen_reached and rev_degen_reached:
break
if to_del:
to_keep = [idx for idx in pairs_series.index if not idx in to_del]
pairs = {key:pairs[key] for key in to_keep}
pairs_series = pairs_series[to_keep]
clear_output()
print "Total Coverage: %d" % (len(best))
print "Forward Degeneration: %d" % (len(included_fwd))
print "Reverse Degeneration: %d" % (len(included_rev))
print "Remaing pairs: %d" % (len(pairs))
sys.stdout.flush()
return {"fwd":included_fwd,
"rev":included_rev,
"covered":best}
# <codecell>
def enumerate_primers(target_file_name,
fwd_starts,
rev_starts,
hairpin_tm_max = 30,
primer_conc = 200,
homo_tm_max = 30,
kmer_sizes = [18, 19, 20, 21, 23, 24, 25, 26, 27, 28],
tm_min = 55,
tm_max = 60,
min_occurrence = 10,
no_3_T = True,
no_poly_3_GC = True,
no_poly_run = True,
max_degen = 60,
mv_conc=50,
dv_conc=1.5,
look_ahead = 50):
"""
Enumerates forward and reverse primers from two regions of an alignment and filters them according to user-defined criteria.
Arguments:
-target_file_name: a string given the name of the fasta file with aligned sequences to be used for enumeration of oligos;
-fwd_starts: a list of integers giving the starting positions for enumerating forward primers;
-rev_starts: a list of integers giving the starting positions for enumerating reverse primers;
-hairpin_max_tm: maximal hairpin melting temperature allowed (in Celsius degrees);
-primer_conc: total primer concentration in nM. This concentration will be rescaled automatically by the max_degen.
-homo_tm_max: maximal homodimer melting temperature allowed (in Celsius degrees);
-kmer_sizes: a list of integers with the desired size of oligos;
-tm_min: minimal melting temperature allowed for a oligo (in Celsius degrees);
-tm_max: maximal melting temperature allowed for a oligo (in Celsius degrees);
-min_occurrence: integer. Minimal allowed occurrence of a oligo along all sequences;
-no_3_T: boolean. Should oligos with a T in the 3' end be discarded?
-no_poly_3_GC: boolean. Should oligos with three G's of C's in the 3' end be discarded?
-max_degen: the maximal number of subprimers desired. This will also be used to rescale the oligo concentration.
-no_poly_run: boolean. Should oligos with four or more runs of the same bases be discarded?
-step: distance between positions in the aligment from which primers should be enumerated. A step of 1 implies that
all positions will be used.
-mv_conc: monovalent ions concentration in mM.
-dv_conc: divalent ions concentration in mM.
"""
records = read_fasta(target_file_name)
seqs_all = [str(record.seq) for record in records]
# Enumerate oligos
fwd_unique = enumerate_oligos(starts = fwd_starts,
kmer_sizes = kmer_sizes,
seqs_all = seqs_all,
look_ahead = look_ahead)
rev_unique = enumerate_oligos(starts = rev_starts,
kmer_sizes = kmer_sizes,
seqs_all = seqs_all,
look_ahead = look_ahead)
# Filter oligos
fwd_oligos = filter_oligos(fwd_unique,
rev = False,
hairpin_tm_max = hairpin_tm_max,
homo_tm_max = homo_tm_max,
tm_max = tm_max,
tm_min = tm_min,
min_occurrence = min_occurrence,
primer_conc = primer_conc,
no_3_T = no_3_T,
no_poly_3_GC = no_poly_3_GC,
no_poly_run = no_poly_run,
max_degen = max_degen,
mv_conc = mv_conc,
dv_conc = dv_conc)
rev_oligos = filter_oligos(rev_unique,
rev = True,
hairpin_tm_max = hairpin_tm_max,
homo_tm_max = homo_tm_max,
tm_max = tm_max,
tm_min = tm_min,
min_occurrence = min_occurrence,
primer_conc = primer_conc,
no_3_T = no_3_T,
no_poly_3_GC = no_poly_3_GC,
no_poly_run = no_poly_run,
max_degen = max_degen,
mv_conc = mv_conc,
dv_conc = dv_conc)
# Remove redundancy
fwd_unique = pandas.Series({oligo:fwd_unique[oligo] for oligo in fwd_oligos})
fwd_reduced = discard_redundant(fwd_unique,
max_degen = max_degen,
rev = False,
primer_conc = primer_conc,
verbose = False,
min_diff = 0,
mv_conc = mv_conc,
dv_conc = dv_conc)
fwd_unique = fwd_reduced["oligo_series"]
fwd_cover = fwd_unique.map(len).sort(inplace = False, ascending = False)
rev_unique = pandas.Series({oligo:rev_unique[oligo] for oligo in rev_oligos})
rev_reduced = discard_redundant(rev_unique,
max_degen = max_degen,
rev = True,
primer_conc = primer_conc,
verbose = False,
min_diff = 0,
mv_conc = mv_conc,
dv_conc = dv_conc)
redundancy = {"rev":rev_reduced["redundant"], "fwd":fwd_reduced["redundant"]}
# Calculate coverage
rev_unique = rev_reduced["oligo_series"]
rev_cover = rev_unique.map(len).sort(inplace = False, ascending = False)
# Make all possible combinations of primers as pairs
pairs = enumerate_pairs(fwd_unique, rev_unique)
if not pairs:
return
all_pairs = set()
for pair in pairs.values():
all_pairs = all_pairs.union(pair)
all_fwd = set()
for fwd in fwd_unique.values:
all_fwd = all_fwd.union(fwd)
all_rev = set()
for rev in rev_unique.values:
all_rev = all_rev.union(rev)
seqs_detected = all_fwd.intersection(all_rev)
# Report results
print "Coverage of all fwd: %d " % len(all_fwd)
print "Coverage of all rev: %d " % len(all_rev)
print "Joint coverage of fwd and rev: %d" % len(all_fwd.intersection(all_rev))
print "Max possible coverage: %d" % len(all_pairs)
print "Number of Foward Oligos: %d" % len(fwd_unique)
print "Number of Reverse Oligos: %d" % len(rev_unique)
return (fwd_unique, rev_unique, redundancy, pairs, seqs_detected)
# <codecell>
def increase_degeneracy(included_oligos, redundancy, oligo_series, min_increase):
"""
After all pairs of primers have been exhausted, this function tries to add new primers independently
if the degeneracy is below the maximum specified by the user. This is done in the hope that these primers
will pair with other primers when they have one or more mismatch with the template sequence.
Arguments:
-included_oligos: oligos already included as returned by the function select pairs;
-redundancy: the dict of oligo groups as returned by enumerate_primers;
-oligo_series: a pandas series of oligos detected by each primer as returned by enumerate_primers;
-min_increase: minimal number of new sequences detected for a new primer to be detected.
"""
# Some oligos in the included oligos are not in the key of the dict redundancy. When that is the case
# I have to look for it in the values of this dict. The purpose is to have a set of all detected sequences
found = []
for oligo in included_oligos:
if oligo in oligo_series.index:
accs = oligo_series[oligo]
else:
for value in redundancy.values():
if oligo in value:
for oligo in value:
if oligo in oligo_series.index:
accs = oligo_series[key]
break
else:
pass
break
for acc in accs:
found += [acc]
found = set(found)
not_included = list(set(oligo_series.index) - set(included_oligos))
while not_included and \
len(included_oligos) < max_degen:
unions = pandas.Series()
for oligo in not_included:
unions[oligo] = len(found.union(oligo_series[oligo]))
unions = unions - len(found)
increase = unions.max()
best_oligo = unions.idxmax()
if increase > min_increase:
found = found.union(oligo_series[best_oligo])
included_oligos += [best_oligo]
not_included.remove(best_oligo)
else:
break
"Perfect matches are %d." % len(found)
return included_oligos
# <codecell>
def reuse_redundant(fwd, rev, redundancy_dict, max_delta, max_tm_ht,
dv_conc, mv_conc, primer_conc):
if fwd in redundancy_dict["fwd"]:
fwd_list = redundancy_dict["fwd"][fwd]
else:
fwd_list = [fwd]
if rev in redundancy_dict["rev"]:
rev_list = redundancy_dict["rev"][rev]
else:
rev_list = [rev]
for fwd in fwd_list:
for rev in rev_list:
is_compatible = not filter_pair(fwd = fwd, rev = rev, max_delta = max_delta,
max_tm_ht = max_tm_ht, dv_conc = dv_conc,
mv_conc = mv_conc, primer_conc = primer_conc)
if is_compatible:
return (fwd, rev)
# <codecell>
def test_heterodimers_post(fwds, revs_comp, primer_conc, mv_conc, dv_conc, max_ht_tm):
"""
Tests if there are hereterodimers among the primers returned by select_pairs.
Arguments:
-best: the object returned by select_pairs;
-max_degen: maximal degeneration allowed. Used to rescaled primer concentration.
-mv_conc: monovalent ions concentration in mM;
-dv_conc: divalent ions concentration in mM.
"""
# Rescale primer concentration
degeneracy = max(len(fwds), len(revs_comp))
primer_conc_resc = primer_conc/float(degeneracy)
pairs = list(product(fwds, revs_comp))
calc_ht_tm = partial(primer3.calcHeterodimerTm,
dna_conc = primer_conc_resc,
mv_conc = mv_conc,
dv_conc = dv_conc)
hetero_tms = map(lambda x: calc_ht_tm(x[0], x[1]) > max_ht_tm, pairs)
if any(hetero_tms):
print "Heterodimers found!" # I will improve this later
print [pair for count, pair in enumerate(pairs) if hetero_tms[count]]
else:
print "No Heterodimers found!"
# <headingcell level=3>
# Test Primers
# <codecell>
def search_versions_oligos(oligo, substitution, database):
"""
Not used
"""
exp = "(%s){s<=%d}" % (oligo, substitution)
def search_oligo(seq, exp = exp):
match = regex.search(exp, seq[1])
if match:
return match.groups()
dview.push({"search_oligo":search_oligo,
"exp":exp})
task = lview.map(search_oligo, database, chunksize = 500)
wait_on(task)
found = [s for s in task.result if s]
# <codecell>
def search_oligos(fwds, revs, substitution, database, return_results = False, return_coverage = False, verbose = True):
"""
Searches oligos in a list of sequences using regular expression.
Arguments:
-fwds: a list of strings giving the forward primers to be searched.
-revs: a list of strings giving the reverse primers to be searched.
-substitution: an integer giving the maximum number of mismatches allowerd.
-database: a list of strings giving the sequences to be used as template.
Fails if the template and the primers are not in the same orientation.
"""
fwd_exp = "|".join(["(%s){s<=%d}"% (primer, substitution) for primer in fwds])
rev_exp = "|".join(["(%s){s<=%d}"% (primer, substitution) for primer in revs])
fwd_exp = fwd_exp.replace("I", "[ACTG]")
rev_exp = rev_exp.replace("I", "[ACTG]")
def search_pair(seq, fwd_exp = fwd_exp, rev_exp = rev_exp):
fwd_match = regex.search(fwd_exp, seq[1])
rev_match = regex.search(rev_exp, seq[1])
if fwd_match and rev_match:
return (fwd_match.groups(), rev_match.groups())
dview.push({"search_pair":search_pair,
"fwd_exp":fwd_exp,
"rev_exp":rev_exp})
task = lview.map(search_pair, database, chunksize = 500)
wait_on(task, verbose = verbose)
found = [s for s in task.result if s]
coverage = len(found)
if return_coverage:
return coverage
elif return_results:
return task.result
else:
print "Coverage is %d out of %d sequences" % (coverage, len(database))
# <codecell>
def make_mfe_cmd(primers,
database,
output,
mfe_exec,
ppc,
min_tm,
oligo_conc,
mv_conc,
dv_conc):
"""
Make the MFEprimer command to test the primers' coverage and/or specificity.
"""
cmd = "%s " % mfe_exec +\
"-i %s " % primers +\
"--oligo_conc=%f " % oligo_conc +\
"-d %s " % database +\
"--mono_conc=%f " % mv_conc +\
"--diva_conc=%f " % dv_conc +\
"--tm_start=%f " % min_tm +\
"--ppc %d " % ppc +\
"--tab " +\
"-o %s " % output
return cmd
# <codecell>
def run_mfe_parallel(database,
fwd_list,
rev_list,
output,
min_tm = 40,
ppc = 30,
mfe_exec = "./MFEprimer/MFEprimer.py",
oligo_conc = 200,
mv_conc = 50,
dv_conc = 1.5):
"""
Runs MFEprimer in parallel to test the specificity and coverage of a set of primers.
Arguments:
-database: the name of the fasta file where the unaligned sequences are.
-fwd_list: a list of strings giving the fwd primers to be tested.
-rev_list: a list of strings giving the rev primers to be tested.
-output: the name of the output file including the path.
-min_tm: minimal melting temperature for a primer to detected a target sequence.
-ppc: see MFEprimer manual or just keep it as it is.
-mfe_exec: the path for the MFEprimer python file.
-oligo_conc: primer concentration. It should be manually rescaled if degenerate primers
of multiplex is being used.
-mv_conc: monovalent ions concentration in mM.
-dv_conc: divalent ions concentration in mM.
"""
# There was a conflict with some other product object, so I decided to import it here
from itertools import product
if os.path.isdir("./data/.temp_mfe"):
shutil.rmtree("./data/.temp_mfe")
os.makedirs("./data/.temp_mfe")
os.makedirs("./data/.temp_mfe/primers")
os.makedirs("./data/.temp_mfe/results")
def write_primer_pair(pair, count_id):
with open("./data/.temp_mfe/primers/pair_%d" % count_id, "w") as handle:
handle.write(">pair_%d_%s_fp\n%s\n>pair_%d_%s_rp\n%s\n" % \
(count_id, pair[0], pair[0], count_id, pair[1], pair[1]))
primers_pairs = product(fwd_list, rev_list)
pair_dict = {}
for count_id, pair in enumerate(primers_pairs):
pair_dict["pair_%d"%count_id] = pair # Used to index the oligo_conc dict
write_primer_pair(pair, count_id)
primers = os.listdir("./data/.temp_mfe/primers/")
cmds = []
for count, primer in enumerate(primers):
# This is for using a dictionary of oligos concentration
if type(oligo_conc) == dict:
pair = pair_dict[primer]
if pair[0] in oligo_conc:
curr_oligo_conc = oligo_conc[pair[0]]
elif pair[1] in oligo_conc:
curr_oligo_conc = oligo_conc[pair[1]]
else:
raise Exception("Oligo concentration invalid! Are the oligos in the correct strand?")
else:
try:
curr_oligo_conc = float(oligo_conc)
except TypeError:
raise Exception("Oligo_conc must be either a number or a dictionary")
cmd = make_mfe_cmd(primers = "./data/.temp_mfe/primers/%s" % primer,
database = database,
output = "./data/.temp_mfe/results/result_%d" % count,
min_tm = min_tm,
ppc = ppc,
mfe_exec = mfe_exec,
oligo_conc = curr_oligo_conc,
mv_conc = mv_conc,
dv_conc = dv_conc)
cmds += [cmd]
run_cmd = lambda cmd: os.system(cmd)
dview.push({"run_cmd":run_cmd})
task = lview.map(run_cmd, cmds)
wait_on(task)
cmd = ("cd ./data/.temp_mfe/results/;"
"cat $(ls) > all_results;"
"awk 'NR==1{print $0} !/AmpID/ {print $0}' all_results > primers_out.txt;"
"cd ../../../;"
"cp ./data/.temp_mfe/results/primers_out.txt %s")%output
os.system(cmd)
#shutil.rmtree("./data/.temp_mfe")
return task
# <codecell>
#***************************** In development
# <codecell>
def run_tntblast(nproc,
fwd_list,
rev_list,
output_name,
database_name,
min_tm,
max_tm,
primer_conc,
mv_conc,
dntp_conc = 0.8,
dv_conc = 1.5,
plex = False,
clamp = None,
rescale_conc = False,
target_strand = "plus",
lighter_output = True):
"""
Tests the primers set using thermonuclotide blast.
Arguments:
-nproc: an integer giving the number of processors to be used by mpi
-query_name: a string with the name of the file containing the primers
-output_name: a string giving the name of the output file
-database_name: a string giving the name of the fasta file to be used as input
-min_tm: an integer with the minimal melting temperature
-max_tm: an integer with the maximal melting temperature
-primer_conc: a string with the primer concentration formatted as float.
"""
# To avoid conflict with pylab
from itertools import product
pairs = product(fwd_list, rev_list)
with open("./data/.tnt_primers", "w") as handle:
for count, pair in enumerate(pairs):
handle.write("pair_%d\t%s\t%s\n" % (count, pair[0], pair[1]))
try:
mv_corrected = mv_conc + 120 * (dv_conc - dntp_conc)**.5
except ValueError:
mv_corrected = mv_conc
cmd = "mpirun -np %d "%nproc +\
"tntblast -i %s "% './data/.tnt_primers' +\
"-s %fe-3 "%mv_corrected +\
"-o %s "%output_name +\
"-d %s "%database_name +\
"-e %d "%min_tm +\
"-x %d "%max_tm +\
"-t %fe-9 "%primer_conc +\
"--target-strand=%s "%target_strand
if plex:
cmd += " --plex=F "
if clamp:
cmd += "--primer-clamp=%d "%clamp
if not rescale_conc:
cmd += "--rescale-ct=F "
if lighter_output:
cmd += " -a F -M F "
print cmd
process = os.system(cmd)
return process
# <codecell>
#********************* End of development section
# <codecell>
def process_data(fwd_data_name, rev_data_name, delta_tm = 5, return_raw = False):
"""
Combines the results for fwd and rev primer in a unique dataframe and keeps only the best
match for each sequence detected.
Arguments:
-fwd_data_name: the name of the data file with the results of MFEprimer for foward primers.
-rev_data_name: the name of the data file with the results of MFEprimer for reverse primers.
-delta_tm: maximal allowed absolute difference between fwd and reverse melting temperature in Celsius degrees.
"""
data_fwd = pandas.read_csv(fwd_data_name, sep = "\t")
data_rev = pandas.read_csv(rev_data_name, sep = "\t")
data_fwd = data_fwd[["FpID", "HitID", "FpTm", "BindingStart"]]
data_rev = data_rev[["RpID", "HitID", "RpTm", "BindingStop"]]
data = pandas.merge(data_rev, data_fwd, on="HitID", how="outer")
data = data.dropna()
data["DeltaTm"] = np.abs(data.FpTm - data.RpTm)
data["AmpLen"] = data.BindingStop - data.BindingStart
data = data.ix[data.DeltaTm <= delta_tm, :]
data["fwd_primer"] = data.FpID.map(lambda x: x.split("_")[2])
data["rev_primer"] = data.RpID.map(lambda x: x.split("_")[2])
data["Lowest_Tm"] = data.apply(lambda row: min(row["FpTm"], row["RpTm"]), axis = 1)
if return_raw:
return data
grouped = data.groupby(["HitID"], as_index = False)
data = grouped.apply(lambda group: group.ix[group.Lowest_Tm.idxmax()])
fwds_tm = data.groupby("fwd_primer").FpTm.max()
revs_tm = data.groupby("rev_primer").RpTm.max()
def calculate_max_diff(row, fwds_tm, revs_tm):
fwd = row["fwd_primer"]
rev = row["rev_primer"]
fwd_tm_max = fwds_tm[fwd]
rev_tm_max = revs_tm[rev]
fwd_diff = np.abs(row["FpTm"] - fwd_tm_max)
rev_diff = np.abs(row["RpTm"] - rev_tm_max)
return max(fwd_diff, rev_diff)
data["Diff"] = data.apply(lambda x:calculate_max_diff(x, fwds_tm, revs_tm), axis = 1)
return data
# <headingcell level=3>
# Add inosine to primers
# <codecell>
def calc_tm_general(oligos,
complements,
oligo_conc,
mv_conc,
dv_conc,
nn_method = "all97",
salt_method = "san04",
in_file = "./data/oligos_for_melting",
verbose = False):
if not complements:
complements = [str(Seq(oligo).complement()) for oligo in oligos]
complements = [complement.replace("I", "A") for complement in complements]
assert type(oligo_conc) in [float, int], "Oligo concentration must be numeric."
assert type(mv_conc) in [float, int], "Ion concentration must be numeric."
assert type(dv_conc) in [float, int], "Ion concentration must be numeric."
with open(in_file, "w") as handle:
for oligo, complement in zip(oligos, complements):
handle.write("A%sA T%sT\n" % (oligo, complement))
melting_path = os.path.abspath("./MELTING/executable/melting-batch")
file_path = os.path.abspath(in_file)
cmd = Template(
"$melting_path "
"-H dnadna "
"-nn $nn_method "
"-ion $salt_method "
"-P ${oligo_conc}e-9 "
"-E Na=${mv_conc}e-3:Mg=${dv_conc}e-3 "
"$file_path")
cmd = cmd.substitute(oligo = oligo,
salt_method = salt_method,
nn_method = nn_method,
mv_conc = mv_conc,
dv_conc = dv_conc,
oligo_conc = oligo_conc,
melting_path = melting_path,
file_path = file_path)
if verbose:
print cmd
os.system(cmd)
out_file = in_file + ".results.csv"
# To remove a ^M character that is preventing the file to be read
cmd = "awk '!/Delta/{print $1, $2, $3, $4, $5}' %s > %s" % \
(out_file, out_file + ".modified")
os.system(cmd)
results = pandas.read_table(out_file + ".modified", sep = "[\s\t]", header = None)
results.columns = ["Oligo", "Match", "DeltaH",
"DeltaS", "Tm"]
#os.remove(in_file)
#os.remove(out_file)
#os.remove(out_file + ".modified")
#results["Oligo"] = results["Oligo"].map(lambda x: x[1:-1])
#results["Match"] = results["Match"].map(lambda x: x[1:-1])
# To make it comparable with the calculations using primer3
results["Tm"] = results["Tm"] - 1
return results[["Oligo", "Match", "Tm"]]
# <codecell>
def find_match_mfe(data, HitID, oligo, binding, rev = False):
if rev:
start = binding - len(oligo)
stop = binding
else:
start = binding -1
stop = binding + len(oligo) - 1
match = data[HitID][int(start):int(stop)]
if rev:
match = str(match[::-1])
else:
match = str(Seq(match).complement())
return match
# <codecell>
def find_all_matches_from_mfe(data, mfe_database, fwd_dummy, min_tm):
records = make_dict_records("./data/mfe/Nitrososphaera_dummy.genomic")
records = {key:str(value.seq) for key, value in records.iteritems()}
msg = ("Database does not contain dummy oligos. Please provide the same database"
"used in run_mfe_parallel function.")
assert fwd_dummy in records.values()[0], msg
data_valid = data.ix[(data.FpTm > min_tm) & (data.RpTm > min_tm), :]
for oligo, binding, match in [("fwd_primer", "BindingStart", "MatchFwd"),
("rev_primer", "BindingStop", "MatchRev")]:
unique = set(zip(data_valid["HitID"], data_valid[oligo], data_valid[binding]))
unique = list(unique)
rev = oligo == "rev_primer"
matches = map(lambda x: find_match_mfe(data = records,
HitID = x[0],
oligo = x[1],
binding = x[2],
rev = rev),
unique)
data_unique = pandas.DataFrame(unique, columns = ["HitID", oligo, binding])
data_unique[match] = matches
data_valid = pandas.merge(data_valid, data_unique, on = ["HitID", oligo, binding])
return data_valid
# <codecell>
def correct_tm(data, total_oligo_conc, mv_conc = 50, dv_conc = 1.5, min_tm = 40, verbose = False):
data_valid = data.ix[(data.FpTm > min_tm) & (data.RpTm > min_tm), :]
for column_primer, column_match in [("fwd_primer", "MatchFwd"),
("rev_primer", "MatchRev")]:
unique_comb = set(zip(data_valid[column_primer], data_valid[column_match]))
oligos = [unique[0] for unique in unique_comb]
complements = [unique[1] for unique in unique_comb]
oligo_conc_rescaled = total_oligo_conc / float(len(set(oligos)))
tm = calc_tm_general(oligos = oligos,
complements = complements,
oligo_conc = oligo_conc_rescaled,
mv_conc = mv_conc,
dv_conc = dv_conc,
verbose = verbose)
rename_dict = {"Oligo":column_primer,
"Match":column_match,
"Tm":("tm_" + column_primer[:3])}
tm = tm.rename(columns = rename_dict)
data_valid = pandas.merge(data_valid, tm, on = [column_primer, column_match])
return data_valid
# <codecell>
def find_low_agreement_pos(seqs, min_agreement):
"""
Find positions of most frequent mismatches in a given primer.
It is used in the function add_inosine.
"""
pos = zip(*seqs)
comp = {}
for count, p in enumerate(pos):
comp_i = pandas.Series(p).value_counts() / len(p)
max_agreement = comp_i.max()
comp[count] = max_agreement
comp = pandas.Series(comp)
comp = comp[comp < min_agreement]
return comp
# <codecell>
def filter_low_agreement_positions(pos, oligo, allowed_bases,
min_distance, max_inos_add):
"""
Discard regions of high variability in the primer if they are not suitable for inosine addition.
It is used in the function add_inosine.
"""
pos.sort()
# Discard positions whose base is not allowed to be substituted by inosine
good_pos = [p for p in pos.index if oligo[p] in allowed_bases]
included_pos = []
for count, p in enumerate(good_pos):
if count == 0:
included_pos += [p]
continue
distances = [np.abs(p - p_inc) for p_inc in included_pos]
close = [d for d in distances if d < min_distance]
if close:
continue
else:
included_pos += [p]
if len(included_pos) >= max_inos_add:
break
return included_pos
# <codecell>
def add_inosine(oligos, records, min_agreement = .85, allowed_bases = "ATG",
min_distance = 5, max_inos_add = 3):
"""
Adds inosine to the primers.
Arguments:
-oligos: a list of primers where the inosine should be added.
-records: a list of records as returned by the function read_fasta.
-min_agreement: float. Highest proportion allowed of the most abundant base in a given position
for making it a candidate to inosine addition.
-allowed_bases: string giving the bases that can be substituted by inosine in the original primer.
-min_distance: minimal spacing in number of bases that should be between inosines.
-max_inos_add: maximal number of inosines per primer.
Currently there is no way to test for heterodimers, homodimers of hairpins in primers with inosine using
primer3, so the user should test if the inosine addition is causing this kind of problem using tools
like bioanalyzer.
"""
oligos_degen = []
for oligo in oligos:
exp = "(%s){s<=3}"%oligo
seqs = [regex.findall(exp, str(record.seq)) for record in records]
seqs = [s[0] for s in seqs if s]
low_agree = find_low_agreement_pos(seqs, min_agreement)
included_pos = filter_low_agreement_positions(pos = low_agree,
oligo = oligo,
allowed_bases = allowed_bases,
min_distance = min_distance,
max_inos_add = max_inos_add)
oligo = list(oligo)
if included_pos:
for p in included_pos:
oligo[p] = "I"
oligos_degen += ["".join(oligo)]
return oligos_degen
# <codecell>
def remove_redundant_after_inosine_addition(data, min_increase):
"""
Removes oligos that don't increase coverage after inosine addition.
Arguments:
-data: a pandas dataframe with sequences detected by the oligo as returned by measure_coverage_oligos
-min_increase: minimal increase of coverage for a oligo to be kept.
"""
cover = data.sum().sort(inplace = False, ascending = False)
to_del = []
for primer in cover.index:
if primer == cover.index[0]:
previously_detected = data[primer]
continue
union = data[primer] | previously_detected
increase = union.sum() - previously_detected.sum()
if increase < min_increase:
to_del += [primer]
return to_del
# <codecell>
def measure_coverage_oligo(oligos, database, substitution, rev = False):
"""
Measure the coverage of oligos. Used to discard redundancy.
Arguments:
-oligos: a list of oligos to search.
-database: the template sequences to search.
-substitution: maximal number of substitutions (I highly recommend to keep it as 0).
-rev: is the oligo a reverse primer?
"""
covers = {oligo:None for oligo in oligos}
for oligo in oligos:
if rev:
fwds = ["I"]
revs = [oligo]
else:
fwds = [oligo]
revs = ["I"]
covers[oligo] = search_oligos(fwds = fwds,
revs = revs,
substitution = substitution,
database = database,
return_results = True)
covers = pandas.DataFrame(covers)
covers = covers.notnull().astype(int)
return covers
# <codecell>
def distribute_conc_by_cover(data, total_conc):
"""
Divides the total concentration of the primers proportionally to the number of sequences detected by each primer.
Arguments:
-data: a pandas dataframe with sequences detected by the oligo as returned by remove_redundant_after_inosine_addition
-total_conc: total primer concentration in nM
"""
detected = data.sum(axis = 1) > 0
data = data.ix[detected, :]
effec_conc = 200 / float(len(data.index))
ind_conc = (float(1) / data.sum(axis = 1)) * effec_conc
conc = data.apply(lambda x: x*ind_conc, axis = 0).sum()
return dict(conc)
# <codecell>
def make_complement_conc_rev(conc_rev):
new_dict = {}
for primer, conc in conc_rev.iteritems():
comp = str(Seq(primer).reverse_complement())
new_dict[comp] = conc
return new_dict
# <codecell>
def prune_inosine(fwd_degen_list,
fwd_degen_dict,
rev_degen_list,
rev_degen_dict,
orig_coverage,
min_increase,
rev = False):
"""
Removes inosines that don't increase coverage in relation to sequences already detected by other primers.
Arguments:
-fwd_degen_list: a list of forward degenerate primers
-rev_degen_list: a list of reverse degenerate primers
-fwd_degen_dict: a dictionary of forward degenerate primers
-rev_degen_dict: a dictionary of reverse degenerate primers
-orig_coverage: initial coverage.
-min_increase: minimal decrease in number of sequences detected to keep a inosine.
-rev: is the oligo a reverse primer?
"""
if rev:
oligo_list = rev_degen_list
oligo_dict = rev_degen_dict
else:
oligo_list = fwd_degen_list
oligo_dict = fwd_degen_dict
for pos, oligo in enumerate(oligo_list):
inosines = [p for p, b in enumerate(oligo) if b == "I"]
orig_oligo = oligo_dict[oligo]
for inos_pos in inosines:
oligo_wo_1_inos = list(oligo)
oligo_wo_1_inos[inos_pos] = orig_oligo[inos_pos]
oligo_wo_1_inos = "".join(oligo_wo_1_inos)
oligo_list[pos] = oligo_wo_1_inos
if rev:
rev_list_for_test = oligo_list
fwd_list_for_test = fwd_degen_list
else:
rev_list_for_test = rev_degen_list
fwd_list_for_test = oligo_list
mod_coverage = search_oligos(fwds = fwd_list_for_test,
revs = rev_list_for_test,
substitution = 0,
database = database,
return_coverage = True,
verbose = False)
if orig_coverage - mod_coverage >= min_increase:
oligo_list[pos] = oligo # Return to previous value
else:
oligo = oligo_list[pos] # Update the reference for next iterations
orig_coverage = mod_coverage
clear_output()
msg_oligo = "reverse" if rev else "forward"
print "Prunning %s primer %d of %d." % (msg_oligo, pos+1, len(oligo_list))
sys.stdout.flush()
return oligo_list
# <codecell>
def prune_inosine_fwd_rev(rev_degen_list,
fwd_degen_list,
fwd_degen_dict,
rev_degen_dict,
database,
min_increase = 20):
"""
Applies the function prune_inosine to remove inosines that don't increase coverage in relation to
sequences already detected by other primers.
Arguments:
-fwd_degen_list: a list of forward degenerate primers
-rev_degen_list: a list of reverse degenerate primers
-fwd_degen_dict: a dictionary of forward degenerate primers
-rev_degen_dict: a dictionary of reverse degenerate primers
-database: a list of sequences to be searched against the oligos.
-min_increase: minimal decrease in number of sequences detected to keep a inosine.
"""
orig_coverage = search_oligos(fwds = fwd_degen_list,
revs = rev_degen_list,
substitution = 0, database = database,
return_coverage = True,
verbose = False)
fwd_degen_list = prune_inosine(fwd_degen_list = fwd_degen_list,
fwd_degen_dict = fwd_degen_dict,
rev_degen_list = rev_degen_list,
rev_degen_dict = rev_degen_dict,
orig_coverage = orig_coverage,
min_increase = min_increase,
rev = False)
orig_coverage = search_oligos(fwds = fwd_degen_list,
revs = rev_degen_list,
substitution = 0, database = database,
return_coverage = True,
verbose = False)
rev_degen_list = prune_inosine(fwd_degen_list = fwd_degen_list,
fwd_degen_dict = fwd_degen_dict,
rev_degen_list = rev_degen_list,
rev_degen_dict = rev_degen_dict,
orig_coverage = orig_coverage,
min_increase = min_increase,
rev = True)
return {"fwd_degen":fwd_degen_list,
"rev_degen":rev_degen_list}
# <codecell>
def classify_seqs(HitID, records_class):
try:
seq_class = records_class[HitID]
except KeyError:
seq_class = "unknown"
return seq_class
# <headingcell level=3>
# Discard redundant oligos
# <codecell>
def remove_redundant_one(data_raw_filtered, min_increase, rev = False):
"""
Removes sequences that are redundant based on thermodynamic simulations.
Arguments:
-data_raw_filtered: raw data from a MFEprimer run.
-min_increase: minimal increase in coverage to keep a primer.
-rev: is the oligo a reverse primer?
"""
primer_pos = "rev_primer" if rev else "fwd_primer"
grouped = data_raw_filtered.groupby([primer_pos])
covered = grouped.apply(lambda x: set(x["HitID"].unique()))
coverage = covered.map(len).sort(inplace = False)
for primer in coverage.index:
original_coverage = len(data_raw_filtered.HitID.unique())
data_wo_one = data_raw_filtered.ix[data_raw_filtered[primer_pos] != primer, :]
mod_coverage = len(data_wo_one.HitID.unique())
if (original_coverage - mod_coverage) <= min_increase:
data_raw_filtered = data_wo_one
return data_raw_filtered
# <codecell>
def remove_redundant_all(data_raw, min_increase):
"""
Removes sequences that are redundant based on thermodynamic simulations.
Arguments:
-data_raw: raw data from MFEprimer (see example)
-min_increase: minimal increase in coverage to keep a primer.
"""
min_increase = 60
grouped = data_raw.groupby(["fwd_primer", "rev_primer"])
grouped_fwd = data_raw.groupby(["fwd_primer"])
grouped_rev = data_raw.groupby(["rev_primer"])
mean_fwd = grouped_fwd.FpTm.max().mean()
mean_rev = grouped_rev.RpTm.max().mean()
c1 = data_raw.FpTm.map(lambda x: numpy.abs(x - mean_fwd) <= 4) # Is Tm 4C above or below median Tm?
c2 = data_raw.RpTm.map(lambda x: numpy.abs(x - mean_rev) <= 4)
c3 = data_raw.DeltaTm <= 5 # Is delta Tm > 5?
data_raw_filtered = data_raw.ix[c1 & c2 & c3, :]
data_filtered = remove_redundant_one(data_raw_filtered, min_increase, rev = False)
data_filtered = remove_redundant_one(data_filtered, min_increase, rev = True)
return data_filtered
# <codecell>
def substitute_inosine(oligo):
iupac = {
"A":"A",
"C":"C",
"T":"T",
"G":"G",
"R":"AG",
"Y":"CT",
"S":"GC",
"W":"AT",
"K":"GT",
"M":"AC",
"B":"CGT",
"D":"AGT",
"H":"ACT",
"V":"ACG",
"N":"ACTG",
"I":"ACTG",
}
oligo = list(oligo)
sub_primers = [iupac[base] for base in oligo]
sub_primers = list(product(*sub_primers))
sub_primers = ["".join(sub_primer) for sub_primer in sub_primers]
return sub_primers
# <codecell>
def make_degenerate(oligos):
iupac = {
'A': 'A',
'AC': 'M',
'ACG': 'V',
'ACT': 'H',
'ACTG': 'N',
'AG': 'R',
'AGT': 'D',
'AT': 'W',
'C': 'C',
'CGT': 'B',
'CT': 'Y',
'G': 'G',
'GC': 'S',
'GT': 'K',
'T': 'T'
}
for key in iupac.keys():
permuted_keys = permutations(key)
for perm_key in permuted_keys:
iupac["".join(perm_key)] = iupac[key]
bases = [set(column) for column in zip(*oligos)]
bases = ["".join(pos) for pos in bases]
print "|".join(bases)
bases = [iupac[pos] for pos in bases]
return "".join(bases)
# <codecell>
def verify_dimers_degen_primers(fwd_primer_list,
rev_primer_list,
oligo_conc,
mv_conc = 50,
dv_conc = 1.5,
max_dimer_tm = 35):
# Use highest concentration to calculate Tm (conservative)
oligos = fwd_primer_list + rev_primer_list
expanded_oligos = []
for oligo in oligos:
expanded_oligos += substitute_inosine(oligo)
comb_oligos = combinations(expanded_oligos, 2)
found = False
for pair in comb_oligos:
tm = primer3.calcHeterodimerTm(pair[0],
pair[1],
dna_conc = oligo_conc,
dv_conc = dv_conc,
mv_conc = mv_conc)
if tm > max_dimer_tm:
print "Dimer found!"
found = True
print pair
print "Melting temperature: %f" % tm
if not found:
print "No dimer found."
# <codecell>
def verify_hairpins(oligo_list,
oligo_conc,
max_hairpin_tm = 35,
dv_conc = 1.5,
mv_conc = 1.5):
found = False
for oligo in oligo_list:
tm = primer3.calcHairpinTm(oligo,
dna_conc = oligo_conc,
dv_conc = dv_conc,
mv_conc = mv_conc)
if tm > max_hairpin_tm:
print "Hairpin found!"
found = True
print oligo
print "Melting temperature: %f" % tm
if not found:
print "No hairpin found."
# <headingcell level=3>
# Tools for alignment visualization
# <codecell>
def deduplicate_for_visualization(aligned_fasta_file):
print "Removing duplicated sequences."
cluster_sequences(input_file = "./data/ordered/unaligned.fasta",
output_file = "./data/ordered/unaligned_not_amb.fasta",
similarity = 1,
word_size = 10)
not_amb = make_dict_records("./data/ordered/unaligned_not_amb.fasta")
aligned_amb = make_dict_records(aligned_fasta_file)
aligned_not_amb = [aligned_amb[seq_id] for seq_id in not_amb.keys()]
write_fasta("./data/ordered/aligned_not_amb.fasta", aligned_not_amb)
# <codecell>
def find_start_end(seq1, seq2):
start_1 = regex.search(r"^[-\.]*", seq1)
start_2 = regex.search(r"^[-\.]*", seq2)
end_1 = regex.search(r"[-\.]*$", seq1)
end_2 = regex.search(r"[-\.]*$", seq2)
start = max(start_1.end(), start_2.end())
end = min(end_1.start(), end_2.start())
return (start, end)
# <codecell>
def calculate_distance(seq1, seq2):
start, end = find_start_end(seq1, seq2)
distance = sum(a != b for a,b in zip(seq1[start:end], seq2[start:end])) / float(end - start)
return distance
# <codecell>
def write_ordered_fasta(ref_id,
out_fasta, similarity_threshold,
original_aligned_file,
fasta_file = "./data/ordered/aligned_not_amb.fasta"):
records = make_dict_records(fasta_file)
# The ref_id could be removed after deduplication.
if ref_id in records.keys():
ref_seq = str(records[ref_id].seq)
else:
original_records = make_dict_records(original_aligned_file)
try:
ref_seq = str(original_records[ref_id].seq)
except KeyError:
raise Exception("%s is not in the provided fasta file!" % ref_seq)
targets = [str(record.seq) for record in records.values()]
dists = map(lambda target: calculate_distance(ref_seq, target), targets)
dists = {seq_id:dist for seq_id, dist in zip(records.keys(), dists)}
dists = pandas.Series(dists)
dists.sort(inplace = True)
dists = dists[dists <= (1 - similarity_threshold)]
records_sorted = []
for count, seq_id in enumerate(dists.index):
records_sorted += [records[seq_id]]
records_sorted[count].id += "_%.2f%%"% ((1 - dists[seq_id]) * 100)
write_fasta(out_fasta, records_sorted)
return records_sorted
# <codecell>
def calculate_composition(records_ordered, reference_seq_id):
print "Calculating composition."
seqs = [str(record.seq) for record in records_ordered]
pos = zip(*seqs)
compositions = []
for column in pos:
composition = {"A":0, "C":0, "T":0, "G":0, "Others":0}
for base in column:
try:
composition[base.upper()] += 1
except KeyError:
composition["Others"] += 1
compositions += [composition]
data = pandas.DataFrame(compositions) / len(seqs)
data["Seq_ID"] = reference_seq_id
data = data.reset_index()
if not os.path.isdir("./data/ordered/compositions"):
os.makedirs("./data/ordered/compositions")
data.to_csv("./data/ordered/compositions/%s.csv" % reference_seq_id)
# <codecell>
def concatenate_compositions():
files = os.listdir("./data/ordered/compositions/")
cur_file = files.pop()
data = pandas.read_csv("./data/ordered/compositions/%s" % cur_file)
while files:
cur_file = files.pop()
cur_data = pandas.read_csv("./data/ordered/compositions/%s" % cur_file)
data = pandas.concat([data, cur_data])
data.to_csv("./data/ordered/compositions.csv", index = False)
# <codecell>
def main_visualization(reference_seq_ids,
aligned_fasta_file,
out_fasta,
similarity_threshold,
composition_name,
classes = None,
deduplicate = True):
"""
This function reorders the provided aligned sequences in a fasta file according to their
similarity to a reference sequence in the file. It also plot the base composition for the
resulting ordered fasta file.
Arguments:
-reference_seq_ids: must be a list with one or more references. Note the reference is the
first "word" that follows tha > sign in a fasta file. For example, a
record where the sequence description is as follow:
>Bradyrhizobium_3 japonicum
The reference for this sequence would be "Bradyrhizobium_3".
Also note that this argument must be a list, meaning that the reference(s)
must be enclosed by square brackets [] as in the example.
-aligned_fasta_file: a string giving the name (including the path) of the aligned fasta file
to be ordered. This function takes only one fasta file at a time.
-out_fasta: the prefix of the name of the output file. The reference sequence ID will be added
to this prefix to make the name of the file. Make sure you include the path.
-similarity_threshold: them minimum similarity to the reference. Sequences will be discarded
if they are less similar then the threshold. The distance used is the
Hamming distance proportional to sequence size. Pairwise deletion is
used to deal with missing data.
-composition_name: the name of the pdf file with the composition plot, including the path.
-classes: an optional argument in case you want to give a meaningful title for each composition
plot instead of the reference_id. Note that it must be a list and the order of the
elements is correspondent to the order of the elements in reference_seq_ids.
-deduplicate: a boolean (True or False) argument indicating whether or not the fasta file
should be deduplicated.
"""
if not classes:
classes = reference_seq_ids
if type(reference_seq_ids) != list:
raise Exception("reference_seq_ids must be given as a list!")
if os.path.isdir("./data/ordered"):
shutil.rmtree("./data/ordered")
records_test = make_dict_records(aligned_fasta_file)
for seq_id in reference_seq_ids:
assert seq_id in records_test.keys(), "%s not in the provided fasta file!" % seq_id
os.makedirs("./data/ordered")
dealign_seq(in_file_name = aligned_fasta_file,
out_file_name = "./data/ordered/unaligned.fasta")
# Create the "./data/ordered/aligned_not_amb.fasta"
if deduplicate:
deduplicate_for_visualization(aligned_fasta_file = aligned_fasta_file)
else:
shutil.copyfile(aligned_fasta_file,
"./data/ordered/aligned_not_amb.fasta")
out_aligned_files = []
for pos, reference_seq_id in enumerate(reference_seq_ids):
records_ordered = write_ordered_fasta(ref_id = reference_seq_id,
out_fasta = out_fasta + reference_seq_id,
original_aligned_file = aligned_fasta_file,
similarity_threshold = similarity_threshold)
calculate_composition(records_ordered, classes[pos])
out_aligned_files += [out_fasta + reference_seq_id]
concatenate_compositions()
if not ".pdf" in composition_name:
composition_name = composition_name + ".pdf"
make_r_script(composition_name)
process = os.system("Rscript ./data/ordered/plot_composition.R")
if process:
print "It was not possible to plot the composition."
else:
shutil.rmtree("./data/ordered")
print "Done!"
print "The pdf file with compositions was saved as: %s." % composition_name
print "The ordered fasta file(s) were/was saved as follows:"
for file_out in out_aligned_files:
print file_out
# <codecell>
def make_r_script(composition_name):
script = ("""
library(plyr)
library(reshape2)
library(ggplot2)
orig_data <- read.csv("./data/ordered/compositions.csv", stringsAsFactors = FALSE)
data <- melt(orig_data[, -1],
id.vars = c("index", "Seq_ID"),
value.name = "Freq",
variable.name = "Base")
data$Freq <- as.numeric(data$Freq)
width = length(unique(data$index)) / 8
max_index = max(data$index)
data = ddply(data, c("index", "Seq_ID"), transform, Order = order(Freq, decreasing = TRUE))
data = ddply(data, c("index", "Seq_ID"), function(x) x[x$Order, ])
data = ddply(data, c("index", "Seq_ID"), transform, FreqSum = cumsum(Freq))
data$Base <- as.character(data$Base)
data$Base[data$Base == "Others"] <- "-"
data$Seq_ID <- as.factor(data$Seq_ID)
add_space = function(x, n) paste(rep(paste(rep(" ", 100), collapse = ""), n), x, collapse = "")
n = max(data$index) / 30
for(level in levels(data$Seq_ID)){
levels(data$Seq_ID)[levels(data$Seq_ID) == level] <- add_space(level, n)
}
theme_set(theme_minimal(9))
graph = ggplot(data) +
aes(x = index, fill = reorder(Base, Freq), y = Freq) +
facet_wrap(~ Seq_ID, ncol = 1) +
geom_bar(stat = "identity",
colour = "black",
size = .2,
show_guide = FALSE,
width = 1) +
scale_fill_manual(values = c("C" = "red",
"A" = "blue",
"T" = "green" ,
"-" = "grey",
"G" = "orange")) +
geom_text(aes(y = FreqSum - Freq / 2,
label = Base,
size = Freq),
show_guide = FALSE) +
scale_size_continuous(range = c(0.5, 4)) +
labs(x = "Position",
y = "Frequency",
fill = "Bases") +
scale_x_continuous(expand = c(0, 0),
breaks = seq(0, max_index),
labels = seq(1, max_index + 1)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "left",
strip.text.x = element_text(size=8))
ggsave("%s", width = width, height = 1.5 * length(unique(data$Seq_ID)), limitsize = FALSE)
""") % composition_name
with open("./data/ordered/plot_composition.R", "w") as handle:
handle.write(script)
# <codecell>
def create_test_train_datasets(original_data_algn, original_data_not_algn, prop_test = .3):
records_algn = make_dict_records(original_data_algn)
records_not_algn = make_dict_records(original_data_not_algn)
n_seq = len(records_algn)
test_size = int(prop_test * n_seq)
sample_test = random.sample(xrange(n_seq), test_size)
sample_train = set(xrange(n_seq)) - set(sample_test)
acc_test = [records_algn.keys()[i] for i in sample_test]
acc_train = [records_algn.keys()[i] for i in sample_train]
test_records_algn = [records_algn[i] for i in acc_test]
test_records_not_algn = [records_not_algn[i] for i in acc_test]
train_records_algn = [records_algn[i] for i in acc_train]
train_records_not_algn = [records_not_algn[i] for i in acc_train]
write_fasta("./data/aligned_train", train_records_algn)
write_fasta("./data/unaligned_train", train_records_not_algn)
write_fasta("./data/aligned_test", test_records_algn)
write_fasta("./data/unaligned_test", test_records_not_algn)
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
# <codecell>
|
teodecarvalho/PrimerDesign
|
functions_definition.py
|
Python
|
mit
| 110,886
|
[
"BLAST",
"Biopython"
] |
284db1ec8f630f46d2591d49f412376ebf942ec1af8be11f4136ee3864d356ab
|
"""
Retrieve logging information for a DIRAC job
"""
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.DISET.RPCClient import RPCClient
import os, shutil, datetime
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces.Utilities.DCommands import ArrayFormatter
class Params:
def __init__ ( self, session ):
self.__session = session
self.fmt = "pretty"
def setFmt( self, arg = None ):
self.fmt = arg.lower()
def getFmt( self ):
return self.fmt
session = DSession()
params = Params( session )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.registerSwitch( "", "Fmt=", "display format (pretty, csv, json)", params.setFmt )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
exitCode = 0
jobs = map( int, args )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
af = ArrayFormatter( params.getFmt() )
headers = ["Status", "MinorStatus", "ApplicationStatus", "Time", "Source"]
errors = []
for job in jobs:
result = monitoring.getJobLoggingInfo( job )
if result['OK']:
print af.listFormat( result['Value'], headers, sort = headers.index( "Time" ) )
else:
errors.append( result["Message"] )
exitCode = 2
for error in errors:
print "ERROR: %s" % error
DIRAC.exit( exitCode )
|
pigay/COMDIRAC
|
Interfaces/scripts/dlogging.py
|
Python
|
gpl-3.0
| 1,579
|
[
"DIRAC"
] |
b5b089e9888e3ddabbe5b98db0e226cc61a9c6e339693d40a8677ebb0e65f867
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if sys.platform != "win32" and not PY3 and os.path.exists("bin/test"):
# run Sage tests; Sage currently doesn't support Windows or Python 3
# Only run Sage tests if 'bin/test' is present (it is missing from
# our release because everything in the 'bin' directory gets
# installed).
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py",
shell=True, cwd=os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if slow:
# Seed to evenly shuffle slow tests among splits
random.seed(41992450)
random.shuffle(matched)
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/physics/gaussopt.py", # raises deprecation warning
"sympy/galgebra.py", # raises ImportError
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# Use a non-windowed backend, so that the tests work on Travis
import matplotlib
matplotlib.use('Agg')
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 0.1
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
except Exception:
reporter.test_exception(sys.exc_info())
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
has_dependencies = self._process_dependencies(test.globs['_doctest_depends_on'])
if has_dependencies is not True:
# has_dependencies is either True or a message
self._reporter.test_skip(v="\n" + has_dependencies)
continue
if self._reporter._verbose:
self._reporter.write("\n{} ".format(test.name))
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return "Could not find %s" % ex
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return "Could not import matplotlib"
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return "Could not import %s" % mod
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
if self._verbose:
if v is not None:
self.write(message + ' ', "Blue")
else:
self.write(" - ", "Blue")
self.write(char, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
ChristinaZografou/sympy
|
sympy/utilities/runtests.py
|
Python
|
bsd-3-clause
| 82,022
|
[
"VisIt"
] |
0d76da0d99f8725281a65afa6194d7517feecb8d75389043a19c99345256f535
|
# Copyright (c) 2012, CyberPoint International, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CyberPoint International, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
A module for creating and managing node data. Node data in this library can have many types, dependent on whether the conditional probability distributions are discrete, linear Gaussian, or hybrid, and on whether the Bayesian network is static or dynamic. For example input files, see :doc:`unittestdict`, :doc:`unittesthdict`, :doc:`unittestlgdict`, and :doc:`unittestdyndict`.
'''
from .dictionary import Dictionary
class NodeData(Dictionary):
'''
This class represents the node data for each node in a graph. If the Bayesian network is static, it contains the attribute *Vdata*. If the Bayesian network is dynamic, it contains two attributes, *initial_Vdata* and *twotbn_Vdata*. If the Bayesian network has hybrid CPDs, it contains the additional attribute *nodes*.
'''
def __init__(self):
self.Vdata = None
'''A dictionary of node data.'''
self.initial_Vdata = None
'''In dynamic graphs, a dictionary containing node data for the initial time interval.'''
self.twotbn_Vdata = None
'''In dynamic graphs, a dictionary containing node data for every time step interval after the first one.'''
self.nodes = None
'''In hybrid graphs, a dictionary of {key:value} pairs linking the name of each node (the key) to a clas instance (the value) which represents the node, its data, and its sampling function.'''
def load(self, path):
'''
Load node data from an input file located at *path*. Input file must be a plaintext .txt file with a JSON-style representation of a dict. The dict must have the top-level key ``Vdata`` or two top-level keys, ``initial_Vdata`` and ``twotbn_Vdata``. For example::
{
"Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
}
or::
{
"initial_Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
"twotbn_Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
}
The function takes the following arguments:
1. *path* -- The path to the text file that contains input data (e.g., "mydictionary.txt")
In the static case, it modifies *Vdata* to hold the dictionary found at path. In the dynamic case, it modifies the *initial_Vdata* and *twotbn_Vdata* attributes to hold the dictionaries found at path.
'''
self.dictload(path)
# try to load both for normal and dynamic cases
try:
self.Vdata = self.alldata["Vdata"]
except KeyError:
try:
self.initial_Vdata = self.alldata["initial_Vdata"]
self.twotbn_Vdata = self.alldata["twotbn_Vdata"]
except KeyError:
print("Error: NodeData did not recognize input file format.")
# free unused memory
del self.alldata
def entriestoinstances(self):
'''
For each node, convert dictionary entry to class instance.
This method is used only when dealing with Hybrid Bayesian networks as found in the :doc:`hybayesiannetwork` module.
The type of the node must be located in the 'type' attribute of the node's dictionary entry. To see an example of such a dictionary, see :doc:`unittesthdict`. This type is used to instantiate a corresponding class from libpgm/CPDtypes/, and store the node's dictionary info in that class. Thus we lose none of the dictionary data, yet we gain the ability to use the instantiated class's built-in function to choose its own outcome based on the outcomes of its parents.
In order for this method to be called, the self.Vdata attribute must have dictionary entries of the following form::
<vertex name>: {
'type': <type of node -- must correspond to module in /CPDtypes>,
'parents': <array of parents of node>,
'children': <array of children of node>,
<whatever further entries are required by the type*>
}
For instance, type "discrete" requires a "cprob" entry, while type "lg"
requires "mean_base", "mean_scal", and "variance" entries.
The function draws on the data in the *Vdata* attribute, and instantiates the attribute *nodes*, which is a dictionary of {name: instance} pairs where 'name' is the name of the node and 'instance' is a class instance containing the node data and the proper sampling function.
'''
# declare result dict
rarray = dict()
# transform into class instances
for entry in self.Vdata.keys():
# import module containing class
path = str(self.Vdata[entry]["type"])
exec("from libpgm.CPDtypes import " + path)
# instantiate class
exec("tmpnode = " + path + "." + str.capitalize(path) + "(self.Vdata[entry])")
# append to array
exec("rarray['" + str(entry) + "'] = tmpnode")
self.nodes = rarray
|
CyberPoint/libpgm
|
libpgm/nodedata.py
|
Python
|
bsd-3-clause
| 7,041
|
[
"Gaussian"
] |
1d1c5195f77606b774ad3835db2a6fd7582a85d9068b69b550f2f28098c6c997
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# ssh - remote command wrappers using ssh/scp
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""SSH based remote operations"""
import base64
import os
import paramiko
import tempfile
import StringIO
from shared.conf import get_resource_exe, get_configuration_object
def parse_pub_key(public_key):
"""Parse public_key string to paramiko key.
Throws exception if key is broken.
"""
public_key_elms = public_key.split(' ')
# Either we have 'from' or 'ssh-' as first element
if len(public_key_elms) > 0 and \
public_key_elms[0].startswith('ssh-'):
ssh_type_idx = 0
elif len(public_key_elms) > 1 and \
public_key_elms[1].startwith('ssh-'):
ssh_type_idx = 1
else:
msg = 'Invalid ssh public key: (%s)' % public_key
raise ValueError(msg)
head, tail = public_key.split(' ')[ssh_type_idx:2+ssh_type_idx]
bits = base64.decodestring(tail)
msg = paramiko.Message(bits)
if head == 'ssh-rsa':
parse_key = paramiko.RSAKey
elif head == 'ssh-dss':
parse_key = paramiko.DSSKey
else:
# Try RSA for unknown key types
parse_key = paramiko.RSAKey
return parse_key(msg)
def default_ssh_options():
"""Default list of options for ssh connections"""
options = []
options.append('-o BatchMode=yes')
# We need fault tolerance but can't block e.g. grid_script for long
options.append('-o ConnectionAttempts=2')
options.append('-o ConnectTimeout=10')
return options
def copy_file_to_resource(
filename,
dest_path,
resource_config,
logger,
):
"""Copy filename to dest_path relative to resource home on resource
using scp.
"""
configuration = get_configuration_object()
multiplex = '0'
if resource_config.has_key('SSHMULTIPLEX'):
multiplex = str(resource_config['SSHMULTIPLEX'])
hostkey = resource_config['HOSTKEY']
host = resource_config['HOSTURL']
identifier = resource_config['HOSTIDENTIFIER']
unique_id = '%s.%s' % (host, identifier)
res_dir = os.path.join(configuration.resource_home, unique_id)
port = resource_config['SSHPORT']
user = resource_config['MIGUSER']
if dest_path.startswith(os.sep):
logger.warning('copy_file_to_resource: force relative dest path!'
)
dest_path = dest_path.lstrip(os.sep)
# create known-hosts file with only the resources hostkey (could
# this be avoided and just passed as an argument?)
try:
# Securely open a temporary file in resource dir
# Please note that mkstemp uses os.open() style rather
# than open()
(filehandle, key_path) = tempfile.mkstemp(dir=res_dir,
text=True)
os.write(filehandle, hostkey)
os.close(filehandle)
logger.debug('single_known_hosts for %s written in %s' % (host,
key_path))
logger.debug('value %s' % hostkey)
except Exception, err:
logger.error('could not write single_known_hosts %s (%s)'
% (host, err))
options = default_ssh_options()
if '0' != multiplex:
options.append('-o ControlPath=%s/ssh-multiplexing' % res_dir)
options.append('-o Port=%s' % port)
options.append('-o StrictHostKeyChecking=yes')
options.append('-o CheckHostIP=yes')
if hostkey:
options.append('-o UserKnownHostsFile=' + key_path)
command = 'scp %s %s %s@%s:%s >> /dev/null 2>> %s/scp.err' % (
' '.join(options),
filename,
user,
host,
os.path.join(resource_config['RESOURCEHOME'], dest_path),
configuration.log_dir,
)
logger.debug(command)
status = os.system(command) >> 8
# Remove temp file no matter what command returned
try:
os.remove(key_path)
except Exception, err:
logger.error('could not remove %s (%s)' % (key_path, err))
if status != 0:
# File was not sent!! Take action
logger.error(command)
logger.error('scp return code: %s %s' % (status, host))
return False
logger.info('scp ok %s' % host)
return True
def copy_file_to_exe(
local_filename,
dest_path,
resource_config,
exe_name,
logger,
):
"""Copy local_filename to dest_path relative to execution_dir on
exe_name. This needs to go through the resource front end using scp
and the copy method to the exe depends on the shared fs setting.
"""
msg = ''
unique_resource_name = resource_config['HOSTURL'] + '.'\
+ resource_config['HOSTIDENTIFIER']
(status, exe) = get_resource_exe(resource_config, exe_name, logger)
if not status:
msg = "No EXE config for: '" + unique_resource_name + "' EXE: '"\
+ exe_name + "'"
return (False, msg)
if dest_path.startswith(os.sep):
logger.warning('copy_file_to_exe: force relative dest path!')
dest_path = dest_path.lstrip(os.sep)
# copy file to frontend
copy_attempts = 3
for attempt in range(copy_attempts):
copy_status = copy_file_to_resource(local_filename, dest_path,
resource_config, logger)
if not copy_status:
logger.warning('scp of file failed in attempt %d of %d'
% (attempt, copy_attempts))
else:
break
# Remove temporary file no matter what scp returned
try:
os.remove(local_filename)
except Exception, err:
logger.error('Could not remove %s (%s)' % (local_filename, err))
if copy_status:
msg += 'scp of file was successful!\n'
logger.info('scp of file was successful!')
else:
msg += 'scp of file was NOT successful!\n'
logger.error('scp of file was NOT successful!')
return (False, msg)
# copy file to exe
if exe.has_key('shared_fs') and exe['shared_fs']:
ssh_command = 'cp '\
+ os.path.join(resource_config['RESOURCEHOME'], dest_path)\
+ ' ' + exe['execution_dir']
else:
# We do not have exe host keys and don't really care about auth there
ssh_command = 'scp %s %s %s@%s:%s'\
% (' '.join(default_ssh_options()),
os.path.join(resource_config['RESOURCEHOME'],
dest_path), exe['execution_user'], exe['execution_node'
], exe['execution_dir'])
copy_attempts = 3
for attempt in range(copy_attempts):
(status, executed_command) = execute_on_resource(ssh_command,
False, resource_config, logger)
if status != 0:
logger.warning('copy of file to exe failed (%d) in attempt %d of %d'
% (status, attempt, copy_attempts))
else:
break
msg += executed_command + '\n'
if 0 != status:
logger.error('file not copied to exe!')
msg += 'file not copied to exe!\n'
return (False, msg)
else:
logger.info('file copied to exe')
msg += 'file copied to exe\n'
return (True, '')
def execute_on_resource(
command,
background,
resource_config,
logger,
):
"""Execute command on resource"""
configuration = get_configuration_object()
hostkey = resource_config['HOSTKEY']
host = resource_config['HOSTURL']
port = resource_config['SSHPORT']
user = resource_config['MIGUSER']
job_type = 'batch'
if resource_config.has_key('JOBTYPE'):
job_type = resource_config['JOBTYPE']
multiplex = '0'
if resource_config.has_key('SSHMULTIPLEX'):
multiplex = str(resource_config['SSHMULTIPLEX'])
# Use manually added SSHMULTIPLEXMASTER variable to only run master
# from sessions initiated by grid_sshmux.py: There's a race in the
# handling of ControlMaster=auto in openssh-4.3 resulting in error:
# ControlSocket $SOCKET already exists
# (see http://article.gmane.org/gmane.network.openssh.devel/13839)
multiplex_master = False
if resource_config.has_key('SSHMULTIPLEXMASTER'):
multiplex_master = bool(resource_config['SSHMULTIPLEXMASTER'])
identifier = resource_config['HOSTIDENTIFIER']
unique_id = '%s.%s' % (host, identifier)
res_dir = os.path.join(configuration.resource_home, unique_id)
# fname should be unique to avoid race conditions, since several
# cgi-scripts may run at the same time due to a multi process
# or multi thread web server
try:
# Securely open a temporary file in resource dir
# Please note that mkstemp uses os.open() style rather
# than open()
(filehandle, key_path) = tempfile.mkstemp(dir=res_dir,
text=True)
os.write(filehandle, hostkey)
os.close(filehandle)
logger.debug('wrote hostkey %s to %s' % (hostkey, key_path))
except Exception, err:
logger.error('could not write tmp host key file (%s)' % err)
return (-1, '')
options = default_ssh_options()
# Only enable X forwarding for interactive resources (i.e. job_type
# 'interactive' or 'all')
if 'batch' != job_type.lower():
options.append('-X')
options.append('-o Port=%s' % port)
options.append('-o CheckHostIP=yes')
options.append('-o StrictHostKeyChecking=yes')
if hostkey:
options.append('-o UserKnownHostsFile=%s' % key_path)
if '0' != multiplex:
options.append('-o ControlPath=%s/ssh-multiplexing' % res_dir)
# Only open a new control socket if explicitly told so:
# All other invocations will reuse it if possible.
if multiplex_master:
options.append('-o ControlMaster=yes')
batch = []
batch.append('1> /dev/null')
batch.append('2> /dev/null')
if background:
batch.append('&')
# IMPORTANT: careful with the ssh_command line!
# removing explicit bash or changing quotes breaks resource management
ssh_command = 'ssh %s %s@%s "bash -c \'%s %s\'"'\
% (' '.join(options), user, host, command, ' '.join(batch))
logger.debug('running command: %s' % ssh_command)
status = os.system(ssh_command) >> 8
logger.debug('cleaning up after command')
# Remove temp file no matter what ssh command returned
try:
os.remove(key_path)
except Exception, err:
logger.error('Could not remove hostkey file %s: %s'
% (key_path, err))
if 0 != status:
# Command was not executed with return code 0!! Take action
logger.error('%s EXITED WITH STATUS: %s' % (ssh_command,
status))
return (status, ssh_command)
logger.debug('Remote execution ok: %s' % ssh_command)
return (status, ssh_command)
def execute_on_exe(
command,
background,
resource_config,
exe_config,
logger,
):
"""Execute command (through resource) on exe"""
node = exe_config['execution_node']
user = exe_config['execution_user']
options = default_ssh_options()
options.append('-X')
# This command should already be properly escaped by the apostrophes
# in the execute_on_resource call
ssh_command = "ssh %s %s@%s %s" % (' '.join(options), user,
node, command)
logger.debug(ssh_command)
return execute_on_resource(ssh_command, background,
resource_config, logger)
def execute_on_store(
command,
background,
resource_config,
store_config,
logger,
):
"""Execute command (through resource) on store"""
node = store_config['storage_node']
user = store_config['storage_user']
options = default_ssh_options()
options.append('-X')
# This command should already be properly escaped by the apostrophes
# in the execute_on_resource call
ssh_command = "ssh %s %s@%s %s" % (' '.join(options), user,
node, command)
logger.debug(ssh_command)
return execute_on_resource(ssh_command, background,
resource_config, logger)
def execute_remote_ssh(
remote_port,
remote_hostkey,
remote_username,
remote_hostname,
ssh_command,
logger,
ssh_background,
resource_dir='/tmp',
):
"""Wrap old style ssh calls to use new version"""
resource_config = {
'SSHPORT': remote_port,
'HOSTKEY': remote_hostkey,
'MIGUSER': remote_username,
'HOSTURL': remote_hostname,
}
return execute_on_resource(ssh_command, ssh_background,
resource_config, logger)
def generate_ssh_rsa_key_pair(size=2048, public_key_prefix='', public_key_postfix=''):
"""Generates ssh rsa key pair"""
rsa_key = paramiko.RSAKey.generate(size)
string_io_obj = StringIO.StringIO()
rsa_key.write_private_key(string_io_obj)
private_key = string_io_obj.getvalue()
public_key = ("%s ssh-rsa %s %s" % (public_key_prefix, rsa_key.get_base64(), public_key_postfix)).strip()
return (private_key, public_key)
|
heromod/migrid
|
mig/shared/ssh.py
|
Python
|
gpl-2.0
| 13,910
|
[
"Brian"
] |
0401efa85e799c95071b6cbfb21bae770c81b8f2964469c93ae28c96b79d2409
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0029_caregiver_key'),
]
operations = [
migrations.AlterField(
model_name='caregiver',
name='key',
field=models.UUIDField(default=uuid.uuid4, unique=True, editable=False),
),
]
|
koebbe/homeworks
|
visit/migrations/0030_auto_20150614_1814.py
|
Python
|
mit
| 441
|
[
"VisIt"
] |
551e61ebbf67e90f7280e88e852109feda384eb9b7093907fff806d74f4eee9e
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import logging
import math
import os
import re
import sys
# Append mugqic_pipelines directory to Python library path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))))
# MUGQIC Modules
from core.config import *
from core.job import *
from core.pipeline import *
from bfx.design import *
from bfx import gq_seq_utils
from bfx import picard
from bfx import samtools
from pipelines.dnaseq import dnaseq
log = logging.getLogger(__name__)
class ChipSeq(dnaseq.DnaSeq):
"""
ChIP-Seq Pipeline
=================
ChIP-Seq experiments allows the Isolation and sequencing of genomic DNA bound by a specific transcription factor,
covalently modified histone, or other nuclear protein. The pipeline starts by trimming adaptors and
low quality bases and mapping the reads (single end or paired end ) to a reference genome using bwa.
Reads are filtered by mapping quality and duplicate reads are marked. Then, Homer quality control routines
are used to provide information and feedback about the quality of the experiment. Peak calls is executed by MACS
and annotation and motif discovery for narrow peaks are executed using Homer. Statistics of annotated peaks
are produced for narrow peaks and a standard report is generated.
An example of the ChIP-Seq report for an analysis on public ENCODE data is available for illustration purpose only:
[ChIP-Seq report](http://gqinnovationcenter.com/services/bioinformatics/tools/chipReport/index.html).
[Here](https://bitbucket.org/mugqic/mugqic_pipelines/downloads/MUGQIC_Bioinfo_ChIP-Seq.pptx)
is more information about ChIP-Seq pipeline that you may find interesting.
"""
def __init__(self):
# Add pipeline specific arguments
self.argparser.add_argument("-d", "--design", help="design file", type=file)
super(ChipSeq, self).__init__()
@property
def contrasts(self):
contrasts = super(ChipSeq, self).contrasts
# Parse contrasts to retrieve name and type
for contrast in contrasts:
if re.search("^\w[\w.-]*,[BN]$", contrast.name):
contrast.real_name = contrast.name.split(",")[0]
if contrast.name.split(",")[1] == 'B':
contrast.type = 'broad'
elif contrast.name.split(",")[1] == 'N':
contrast.type = 'narrow'
else:
raise Exception("Error: contrast name \"" + contrast.name + "\" is invalid (should be <contrast>,B for broad or <contrast>,N for narrow)!")
return contrasts
def mappable_genome_size(self):
genome_index = csv.reader(open(config.param('DEFAULT', 'genome_fasta', type='filepath') + ".fai", 'rb'), delimiter='\t')
# 2nd column of genome index contains chromosome length
# HOMER and MACS2 mappable genome size (without repetitive features) is about 80 % of total size
return sum([int(chromosome[1]) for chromosome in genome_index]) * 0.8
def samtools_view_filter(self):
"""
Filter unique reads by mapping quality using [Samtools](http://www.htslib.org/).
"""
jobs = []
for readset in self.readsets:
readset_bam_prefix = os.path.join("alignment", readset.sample.name, readset.name, readset.name + ".sorted.")
readset_bam = readset_bam_prefix + "bam"
filtered_readset_bam = readset_bam_prefix + "filtered.bam"
job = samtools.view(readset_bam, filtered_readset_bam, "-b -F4 -q " + str(config.param('samtools_view_filter', 'min_mapq', type='int')))
job.name = "samtools_view_filter." + readset.name
jobs.append(job)
report_file = os.path.join("report", "ChipSeq.samtools_view_filter.md")
jobs.append(
Job(
[os.path.join("alignment", readset.sample.name, readset.name, readset.name + ".sorted.filtered.bam") for readset in self.readsets],
[report_file],
[['samtools_view_filter', 'module_pandoc']],
command="""\
mkdir -p report && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable min_mapq="{min_mapq}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
min_mapq=config.param('samtools_view_filter', 'min_mapq', type='int'),
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="samtools_view_filter_report")
)
return jobs
def picard_merge_sam_files(self):
"""
BAM readset files are merged into one file per sample. Merge is done using [Picard](http://broadinstitute.github.io/picard/).
This step takes as input files:
1. Aligned and sorted BAM output files from previous bwa_mem_picard_sort_sam step if available
2. Else, BAM files from the readset file
"""
jobs = []
for sample in self.samples:
alignment_directory = os.path.join("alignment", sample.name)
# Find input readset BAMs first from previous bwa_mem_picard_sort_sam job, then from original BAMs in the readset sheet.
readset_bams = [os.path.join(alignment_directory, readset.name, readset.name + ".sorted.filtered.bam") for readset in sample.readsets]
sample_bam = os.path.join(alignment_directory, sample.name + ".merged.bam")
mkdir_job = Job(command="mkdir -p " + os.path.dirname(sample_bam))
# If this sample has one readset only, create a sample BAM symlink to the readset BAM, along with its index.
if len(sample.readsets) == 1:
readset_bam = readset_bams[0]
if os.path.isabs(readset_bam):
target_readset_bam = readset_bam
else:
target_readset_bam = os.path.relpath(readset_bam, alignment_directory)
job = concat_jobs([
mkdir_job,
Job([readset_bam], [sample_bam], command="ln -s -f " + target_readset_bam + " " + sample_bam, removable_files=[sample_bam]),
], name="symlink_readset_sample_bam." + sample.name)
elif len(sample.readsets) > 1:
job = concat_jobs([
mkdir_job,
picard.merge_sam_files(readset_bams, sample_bam)
])
job.name = "picard_merge_sam_files." + sample.name
jobs.append(job)
return jobs
def picard_mark_duplicates(self):
"""
Mark duplicates. Aligned reads per sample are duplicates if they have the same 5' alignment positions
(for both mates in the case of paired-end reads). All but the best pair (based on alignment score)
will be marked as a duplicate in the BAM file. Marking duplicates is done using [Picard](http://broadinstitute.github.io/picard/).
"""
jobs = []
for sample in self.samples:
alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".")
input = alignment_file_prefix + "merged.bam"
output = alignment_file_prefix + "sorted.dup.bam"
metrics_file = alignment_file_prefix + "sorted.dup.metrics"
job = picard.mark_duplicates([input], output, metrics_file)
job.name = "picard_mark_duplicates." + sample.name
jobs.append(job)
report_file = os.path.join("report", "ChipSeq.picard_mark_duplicates.md")
jobs.append(
Job(
[os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam") for sample in self.samples],
[report_file],
command="""\
mkdir -p report && \\
cp \\
{report_template_dir}/{basename_report_file} \\
{report_file}""".format(
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="picard_mark_duplicates_report")
)
return jobs
def metrics(self):
"""
The number of raw/filtered and aligned reads per sample are computed at this stage.
"""
jobs = []
jobs.append(concat_jobs([samtools.flagstat(os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam"), os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam.flagstat")) for sample in self.samples], name="metrics.flagstat"))
trim_metrics_file = os.path.join("metrics", "trimSampleTable.tsv")
metrics_file = os.path.join("metrics", "SampleMetrics.stats")
report_metrics_file = os.path.join("report", "trimMemSampleTable.tsv")
report_file = os.path.join("report", "ChipSeq.metrics.md")
jobs.append(
Job(
[os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam.flagstat") for sample in self.samples],
[report_metrics_file],
[['metrics', 'module_pandoc']],
# Retrieve number of aligned and duplicate reads from sample flagstat files
# Merge trimming stats per sample with aligned and duplicate stats using ugly awk
# Format merge stats into markdown table using ugly awk (knitr may do this better)
command="""\
for sample in {samples}
do
flagstat_file=alignment/$sample/$sample.sorted.dup.bam.flagstat
echo -e "$sample\t`grep -P '^\d+ \+ \d+ mapped' $flagstat_file | grep -Po '^\d+'`\t`grep -P '^\d+ \+ \d+ duplicate' $flagstat_file | grep -Po '^\d+'`"
done | \\
awk -F"\t" '{{OFS="\t"; print $0, $3 / $2 * 100}}' | sed '1iSample\tAligned Filtered Reads\tDuplicate Reads\tDuplicate %' \\
> {metrics_file} && \\
mkdir -p report && \\
if [[ -f {trim_metrics_file} ]]
then
awk -F "\t" 'FNR==NR{{trim_line[$1]=$0; surviving[$1]=$3; next}}{{OFS="\t"; if ($1=="Sample") {{print trim_line[$1], $2, "Aligned Filtered %", $3, $4}} else {{print trim_line[$1], $2, $2 / surviving[$1] * 100, $3, $4}}}}' {trim_metrics_file} {metrics_file} \\
> {report_metrics_file}
else
cp {metrics_file} {report_metrics_file}
fi && \\
trim_mem_sample_table=`if [[ -f {trim_metrics_file} ]] ; then LC_NUMERIC=en_CA awk -F "\t" '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----:|-----:|-----:|-----:|-----:|-----:|-----:"}} else {{print $1, sprintf("%\\47d", $2), sprintf("%\\47d", $3), sprintf("%.1f", $4), sprintf("%\\47d", $5), sprintf("%.1f", $6), sprintf("%\\47d", $7), sprintf("%.1f", $8)}}}}' {report_metrics_file} ; else LC_NUMERIC=en_CA awk -F "\t" '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----:|-----:|-----:"}} else {{print $1, sprintf("%\\47d", $2), sprintf("%\\47d", $3), sprintf("%.1f", $4)}}}}' {report_metrics_file} ; fi` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable trim_mem_sample_table="$trim_mem_sample_table" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}
""".format(
samples=" ".join([sample.name for sample in self.samples]),
trim_metrics_file=trim_metrics_file,
metrics_file=metrics_file,
report_metrics_file=report_metrics_file,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
name="metrics_report",
removable_files=[report_metrics_file],
report_files=[report_file]
)
)
return jobs
def homer_make_tag_directory(self):
"""
The Homer Tag directories, used to check for quality metrics, are computed at this step.
"""
jobs = []
for sample in self.samples:
alignment_file = os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam")
output_dir = os.path.join("tags", sample.name)
jobs.append(Job(
[alignment_file],
[os.path.join(output_dir, "tagInfo.txt")],
[['homer_make_tag_directory', 'module_samtools'], ['homer_make_tag_directory', 'module_homer']],
command="""\
makeTagDirectory \\
{output_dir} \\
{alignment_file} \\
-checkGC -genome {genome}""".format(
output_dir=output_dir,
alignment_file=alignment_file,
genome=config.param('homer_make_tag_directory', 'assembly')
),
name="homer_make_tag_directory." + sample.name,
removable_files=[output_dir]
))
return jobs
def qc_metrics(self):
"""
Sequencing quality metrics as tag count, tag autocorrelation, sequence bias and GC bias are generated.
"""
# If --design <design_file> option is missing, self.contrasts call will raise an Exception
if self.contrasts:
design_file = os.path.relpath(self.args.design.name, self.output_dir)
report_file = os.path.join("report", "ChipSeq.qc_metrics.md")
output_files = [os.path.join("graphs", sample.name + "_QC_Metrics.ps") for sample in self.samples] + [report_file]
return [Job(
[os.path.join("tags", sample.name, "tagInfo.txt") for sample in self.samples],
output_files,
[
['qc_plots_R', 'module_mugqic_tools'],
['qc_plots_R', 'module_R']
],
command="""\
mkdir -p graphs && \\
Rscript $R_TOOLS/chipSeqGenerateQCMetrics.R \\
{design_file} \\
{output_dir} && \\
cp {report_template_dir}/{basename_report_file} {report_file} && \\
for sample in {samples}
do
cp --parents graphs/${{sample}}_QC_Metrics.ps report/
convert -rotate 90 graphs/${{sample}}_QC_Metrics.ps report/graphs/${{sample}}_QC_Metrics.png
echo -e "----\n\n)](graphs/${{sample}}_QC_Metrics.png)\n" \\
>> {report_file}
done""".format(
samples=" ".join([sample.name for sample in self.samples]),
design_file=design_file,
output_dir=self.output_dir,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
name="qc_plots_R",
removable_files=output_files,
report_files=[report_file]
)]
def homer_make_ucsc_file(self):
"""
Wiggle Track Format files are generated from the aligned reads using Homer.
The resulting files can be loaded in browsers like IGV or UCSC.
"""
jobs = []
for sample in self.samples:
tag_dir = os.path.join("tags", sample.name)
bedgraph_dir = os.path.join("tracks", sample.name)
bedgraph_file = os.path.join(bedgraph_dir, sample.name + ".ucsc.bedGraph.gz")
jobs.append(Job(
[os.path.join(tag_dir, "tagInfo.txt")],
[bedgraph_file],
[['homer_make_ucsc_files', 'module_homer']],
command="""\
mkdir -p {bedgraph_dir} && \\
makeUCSCfile \\
{tag_dir} | \\
gzip -1 -c > {bedgraph_file}""".format(
tag_dir=tag_dir,
bedgraph_dir=bedgraph_dir,
bedgraph_file=bedgraph_file
),
name="homer_make_ucsc_file." + sample.name,
removable_files=[bedgraph_dir]
))
report_file = os.path.join("report", "ChipSeq.homer_make_ucsc_file.md")
jobs.append(
Job(
[os.path.join("tracks", sample.name, sample.name + ".ucsc.bedGraph.gz")],
[report_file],
command="""\
mkdir -p report && \\
zip -r report/tracks.zip tracks/*/*.ucsc.bedGraph.gz && \\
cp {report_template_dir}/{basename_report_file} report/""".format(
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="homer_make_ucsc_file_report")
)
return jobs
def macs2_callpeak(self):
"""
Peaks are called using the MACS2 software. Different calling strategies are used for narrow and broad peaks.
The mfold parameter used in the model building step is estimated from a peak enrichment diagnosis run.
The estimated mfold lower bound is 10 and the estimated upper bound can vary between 15 and 100.
The default mfold parameter of MACS2 is [10,30].
"""
jobs = []
for contrast in self.contrasts:
if contrast.treatments:
treatment_files = [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam") for sample in contrast.treatments]
control_files = [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam") for sample in contrast.controls]
output_dir = os.path.join("peak_call", contrast.real_name)
if contrast.type == 'broad': # Broad region
other_options = " --broad --nomodel"
else: # Narrow region
if control_files:
other_options = " --nomodel"
else:
other_options = " --fix-bimodal"
jobs.append(Job(
treatment_files + control_files,
[os.path.join(output_dir, contrast.real_name + "_peaks." + contrast.type + "Peak")],
[['macs2_callpeak', 'module_python'], ['macs2_callpeak', 'module_macs2']],
command="""\
mkdir -p {output_dir} && \\
macs2 callpeak {format}{other_options} \\
--gsize {genome_size} \\
--treatment \\
{treatment_files}{control_files} \\
--name {output_prefix_name} \\
>& {output_prefix_name}.diag.macs.out""".format(
output_dir=output_dir,
format="--format " + ("BAMPE" if self.run_type == "PAIRED_END" else "BAM"),
other_options=other_options,
genome_size=self.mappable_genome_size(),
treatment_files=" \\\n ".join(treatment_files),
control_files=" \\\n --control \\\n " + " \\\n ".join(control_files) if control_files else " \\\n --nolambda",
output_prefix_name=os.path.join(output_dir, contrast.real_name)
),
name="macs2_callpeak." + contrast.real_name,
removable_files=[output_dir]
))
else:
log.warning("No treatment found for contrast " + contrast.name + "... skipping")
report_file = os.path.join("report", "ChipSeq.macs2_callpeak.md")
jobs.append(
Job(
[os.path.join("peak_call", contrast.real_name, contrast.real_name + "_peaks." + contrast.type + "Peak") for contrast in self.contrasts],
[report_file],
command="""\
mkdir -p report && \\
cp {report_template_dir}/{basename_report_file} report/ && \\
for contrast in {contrasts}
do
cp -a --parents peak_call/$contrast/ report/ && \\
echo -e "* [Peak Calls File for Design $contrast](peak_call/$contrast/${{contrast}}_peaks.xls)" \\
>> {report_file}
done""".format(
contrasts=" ".join([contrast.real_name for contrast in self.contrasts]),
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="macs2_callpeak_report")
)
return jobs
def homer_annotate_peaks(self):
"""
The peaks called previously are annotated with HOMER using RefSeq annotations for the reference genome.
Gene ontology and genome ontology analysis are also performed at this stage.
"""
jobs = []
for contrast in self.contrasts:
if contrast.treatments:
peak_file = os.path.join("peak_call", contrast.real_name, contrast.real_name + "_peaks." + contrast.type + "Peak")
output_prefix = os.path.join("annotation", contrast.real_name, contrast.real_name)
annotation_file = output_prefix + ".annotated.csv"
jobs.append(concat_jobs([
Job(command="mkdir -p " + output_prefix),
Job(
[peak_file],
[
annotation_file,
os.path.join(output_prefix, "geneOntology.html"),
os.path.join(output_prefix, "GenomeOntology.html")
],
[['homer_annotate_peaks', 'module_perl'], ['homer_annotate_peaks', 'module_homer']],
command="""\
annotatePeaks.pl \\
{peak_file} \\
{genome} \\
-gsize {genome} \\
-cons -CpG \\
-go {output_prefix} \\
-genomeOntology {output_prefix} \\
> {annotation_file}""".format(
peak_file=peak_file,
genome=config.param('homer_annotate_peaks', 'assembly'),
genome_size=config.param('homer_annotate_peaks', 'assembly'),
output_prefix=output_prefix,
annotation_file=annotation_file
)
),
Job(
[annotation_file],
[
output_prefix + ".tss.stats.csv",
output_prefix + ".exon.stats.csv",
output_prefix + ".intron.stats.csv",
output_prefix + ".tss.distance.csv"
],
[['homer_annotate_peaks', 'module_perl'], ['homer_annotate_peaks', 'module_mugqic_tools']],
command="""\
perl -MReadMetrics -e 'ReadMetrics::parseHomerAnnotations(
"{annotation_file}",
"{output_prefix}",
{proximal_distance},
{distal_distance},
{distance5d_lower},
{distance5d_upper},
{gene_desert_size}
)'""".format(
annotation_file=annotation_file,
output_prefix=output_prefix,
proximal_distance=config.param('homer_annotate_peaks', 'proximal_distance', type='int'),
distal_distance=config.param('homer_annotate_peaks', 'distal_distance', type='int'),
distance5d_lower=config.param('homer_annotate_peaks', 'distance5d_lower', type='int'),
distance5d_upper=config.param('homer_annotate_peaks', 'distance5d_upper', type='int'),
gene_desert_size=config.param('homer_annotate_peaks', 'gene_desert_size', type='int')
),
removable_files=[os.path.join("annotation", contrast.real_name)]
)
], name="homer_annotate_peaks." + contrast.real_name))
else:
log.warning("No treatment found for contrast " + contrast.name + "... skipping")
report_file = os.path.join("report", "ChipSeq.homer_annotate_peaks.md")
jobs.append(
Job(
[os.path.join("annotation", contrast.real_name, contrast.real_name + ".annotated.csv") for contrast in self.contrasts],
[report_file],
command="""\
mkdir -p report/annotation/ && \\
cp {report_template_dir}/{basename_report_file} report/ && \\
for contrast in {contrasts}
do
rsync -avP annotation/$contrast report/annotation/ && \\
echo -e "* [Gene Annotations for Design $contrast](annotation/$contrast/${{contrast}}.annotated.csv)\n* [HOMER Gene Ontology Annotations for Design $contrast](annotation/$contrast/$contrast/geneOntology.html)\n* [HOMER Genome Ontology Annotations for Design $contrast](annotation/$contrast/$contrast/GenomeOntology.html)" \\
>> {report_file}
done""".format(
contrasts=" ".join([contrast.real_name for contrast in self.contrasts]),
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="homer_annotate_peaks_report")
)
return jobs
def homer_find_motifs_genome(self):
"""
De novo and known motif analysis per design are performed using HOMER.
"""
jobs = []
for contrast in self.contrasts:
# Don't find motifs for broad peaks
if contrast.type == 'narrow' and contrast.treatments:
peak_file = os.path.join("peak_call", contrast.real_name, contrast.real_name + "_peaks." + contrast.type + "Peak")
output_dir = os.path.join("annotation", contrast.real_name, contrast.real_name)
jobs.append(Job(
[peak_file],
[
os.path.join(output_dir, "homerResults.html"),
os.path.join(output_dir, "knownResults.html")
],
[
['homer_find_motifs_genome', 'module_perl'],
['homer_find_motifs_genome', 'module_weblogo'],
['homer_find_motifs_genome', 'module_homer']
],
command="""\
mkdir -p {output_dir} && \\
findMotifsGenome.pl \\
{peak_file} \\
{genome} \\
{output_dir} \\
-preparsedDir {output_dir}/preparsed \\
-p {threads}""".format(
peak_file=peak_file,
genome=config.param('homer_find_motifs_genome', 'assembly'),
output_dir=output_dir,
threads=config.param('homer_find_motifs_genome', 'threads', type='posint')
),
name="homer_find_motifs_genome." + contrast.real_name,
removable_files=[os.path.join("annotation", contrast.real_name)]
))
else:
log.warning("No treatment found for contrast " + contrast.name + "... skipping")
report_file = os.path.join("report", "ChipSeq.homer_find_motifs_genome.md")
jobs.append(
Job(
[os.path.join("annotation", contrast.real_name, contrast.real_name, "homerResults.html") for contrast in self.contrasts if contrast.type == 'narrow' and contrast.treatments] +
[os.path.join("annotation", contrast.real_name, contrast.real_name, "knownResults.html") for contrast in self.contrasts if contrast.type == 'narrow' and contrast.treatments],
[report_file],
command="""\
mkdir -p report/annotation/ && \\
cp {report_template_dir}/{basename_report_file} report/ && \\
for contrast in {contrasts}
do
rsync -avP annotation/$contrast report/annotation/ && \\
echo -e "* [HOMER _De Novo_ Motif Results for Design $contrast](annotation/$contrast/$contrast/homerResults.html)\n* [HOMER Known Motif Results for Design $contrast](annotation/$contrast/$contrast/knownResults.html)" \\
>> {report_file}
done""".format(
contrasts=" ".join([contrast.real_name for contrast in self.contrasts if contrast.type == 'narrow' and contrast.treatments]),
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="homer_find_motifs_genome_report")
)
return jobs
def annotation_graphs(self):
"""
The peak location statistics. The following peak location statistics are generated per design:
proportions of the genomic locations of the peaks. The locations are: Gene (exon or intron),
Proximal ([0;2] kb upstream of a transcription start site), Distal ([2;10] kb upstream
of a transcription start site), 5d ([10;100] kb upstream of a transcription start site),
Gene desert (>= 100 kb upstream or downstream of a transcription start site), Other (anything
not included in the above categories); The distribution of peaks found within exons and introns;
The distribution of peak distance relative to the transcription start sites (TSS);
the Location of peaks per design.
"""
# If --design <design_file> option is missing, self.contrasts call will raise an Exception
if self.contrasts:
design_file = os.path.relpath(self.args.design.name, self.output_dir)
input_files = []
output_files = []
for contrast in self.contrasts:
annotation_prefix = os.path.join("annotation", contrast.real_name, contrast.real_name)
input_files.append(annotation_prefix + ".tss.stats.csv")
input_files.append(annotation_prefix + ".exon.stats.csv")
input_files.append(annotation_prefix + ".intron.stats.csv")
input_files.append(annotation_prefix + ".tss.distance.csv")
output_files.append(os.path.join("graphs", contrast.real_name + "_Misc_Graphs.ps"))
peak_stats_file = os.path.join("annotation", "peak_stats.csv")
output_files.append(peak_stats_file)
report_file = os.path.join("report", "ChipSeq.annotation_graphs.md")
output_files.append(report_file)
return [Job(
input_files,
output_files,
[
['annotation_graphs', 'module_mugqic_tools'],
['annotation_graphs', 'module_R'],
['annotation_graphs', 'module_pandoc']
],
command="""\
mkdir -p graphs && \\
Rscript $R_TOOLS/chipSeqgenerateAnnotationGraphs.R \\
{design_file} \\
{output_dir} && \\
mkdir -p report/annotation/ && \\
if [[ -f {peak_stats_file} ]]
then
cp {peak_stats_file} report/annotation/
peak_stats_table=`LC_NUMERIC=en_CA awk -F "," '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----|-----:|-----:|-----:|-----:|-----:|-----:"}} else {{print $1, $2, sprintf("%\\47d", $3), $4, sprintf("%\\47.1f", $5), sprintf("%\\47.1f", $6), sprintf("%\\47.1f", $7), sprintf("%\\47.1f", $8)}}}}' {peak_stats_file}`
else
peak_stats_table=""
fi
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable peak_stats_table="$peak_stats_table" \\
--variable proximal_distance="{proximal_distance}" \\
--variable distal_distance="{distal_distance}" \\
--variable distance5d_lower="{distance5d_lower}" \\
--variable distance5d_upper="{distance5d_upper}" \\
--variable gene_desert_size="{gene_desert_size}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file} && \\
for contrast in {contrasts}
do
cp --parents graphs/${{contrast}}_Misc_Graphs.ps report/
convert -rotate 90 graphs/${{contrast}}_Misc_Graphs.ps report/graphs/${{contrast}}_Misc_Graphs.png
echo -e "----\n\n)](graphs/${{contrast}}_Misc_Graphs.png)\n" \\
>> {report_file}
done""".format(
design_file=design_file,
output_dir=self.output_dir,
peak_stats_file=peak_stats_file,
contrasts=" ".join([contrast.real_name for contrast in self.contrasts if contrast.type == 'narrow' and contrast.treatments]),
proximal_distance=config.param('homer_annotate_peaks', 'proximal_distance', type='int') / -1000,
distal_distance=config.param('homer_annotate_peaks', 'distal_distance', type='int') / -1000,
distance5d_lower=config.param('homer_annotate_peaks', 'distance5d_lower', type='int') / -1000,
distance5d_upper=config.param('homer_annotate_peaks', 'distance5d_upper', type='int') / -1000,
gene_desert_size=config.param('homer_annotate_peaks', 'gene_desert_size', type='int') / 1000,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
name="annotation_graphs",
report_files=[report_file],
removable_files=output_files
)]
@property
def steps(self):
return [
self.picard_sam_to_fastq,
self.trimmomatic,
self.merge_trimmomatic_stats,
self.bwa_mem_picard_sort_sam,
self.samtools_view_filter,
self.picard_merge_sam_files,
self.picard_mark_duplicates,
self.metrics,
self.homer_make_tag_directory,
self.qc_metrics,
self.homer_make_ucsc_file,
self.macs2_callpeak,
self.homer_annotate_peaks,
self.homer_find_motifs_genome,
self.annotation_graphs
]
if __name__ == '__main__':
ChipSeq()
|
ccmbioinfo/mugqic_pipelines
|
pipelines/chipseq/chipseq.py
|
Python
|
lgpl-3.0
| 34,986
|
[
"BWA"
] |
2aa7e664a96c7c2550af13a7f5ccdbdba44233b37519a6b2e2c6646df098b4f1
|
#!/usr/bin/env python
# multimeter-file.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
This file illustrates recording from a iaf_cond_alpha neuron
using a multimeter and writing data to a file.
'''
import nest
import numpy as np
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True, # set to True to permit overwriting
'data_path': '', # path to all data files, from working dir
'data_prefix': ''}) # prefix for all data files
# display recordables for illustration
print 'iaf_cond_alpha recordables: ', nest.GetDefaults('iaf_cond_alpha')['recordables']
# create neuron and multimeter
n = nest.Create('iaf_cond_alpha',
params = {'tau_syn_ex': 1.0, 'V_reset': -70.0})
m = nest.Create('multimeter',
params = {'withtime': True, # store time for each data point
'withgid': True, # store gid for each data point
'to_file': True, # write data to file
'label': 'my_multimeter', # part of file name
'interval': 0.1,
'record_from': ['V_m', 'g_ex', 'g_in']})
# Create spike generators and connect
gex = nest.Create('spike_generator',
params = {'spike_times': np.array([10.0, 20.0, 50.0])})
gin = nest.Create('spike_generator',
params = {'spike_times': np.array([15.0, 25.0, 55.0])})
nest.Connect(gex, n, params={'weight': 40.0}) # excitatory
nest.Connect(gin, n, params={'weight': -20.0}) # inhibitory
nest.Connect(m, n)
# simulate
nest.Simulate(100)
# obtain and display data
events = nest.GetStatus(m)[0]['events']
t = events['times'];
pl.clf()
pl.subplot(211)
pl.plot(t, events['V_m'])
pl.axis([0, 100, -75, -53])
pl.ylabel('Membrane potential [mV]')
pl.subplot(212)
pl.plot(t, events['g_ex'], t, events['g_in'])
pl.axis([0, 100, 0, 45])
pl.xlabel('Time [ms]')
pl.ylabel('Synaptic conductance [nS]')
pl.legend(('g_exc', 'g_inh'))
pl.show()
|
gewaltig/cython-neuron
|
pynest/examples/multimeter_file.py
|
Python
|
gpl-2.0
| 2,695
|
[
"NEURON"
] |
d6d705bf1a9b73ac5a80eca95edbe3bd9d0b0b483677a688348554eca1e0ee50
|
"""
==========================================
Symmetric Diffeomorphic Registration in 2D
==========================================
This example explains how to register 2D images using the Symmetric Normalization
(SyN) algorithm proposed by Avants et al. [Avants09]_ (also implemented in
the ANTS software [Avants11]_)
We will perform the classic Circle-To-C experiment for diffeomorphic registration
"""
import numpy as np
from dipy.data import get_data
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
from dipy.align.metrics import SSDMetric, CCMetric, EMMetric
import dipy.align.imwarp as imwarp
from dipy.viz import regtools
fname_moving = get_data('reg_o')
fname_static = get_data('reg_c')
moving = np.load(fname_moving)
static = np.load(fname_static)
"""
To visually check the overlap of the static image with the transformed moving
image, we can plot them on top of each other with different channels to see
where the differences are located
"""
regtools.overlay_images(static, moving, 'Static', 'Overlay', 'Moving', 'input_images.png')
"""
.. figure:: input_images.png
:align: center
**Input images**.
"""
"""
We want to find an invertible map that transforms the moving image (circle)
into the static image (the C letter)
The first decision we need to make is what similarity metric is appropriate
for our problem. In this example we are using two binary images, so the Sum
of Squared Differences (SSD) is a good choice.
"""
dim = static.ndim
metric = SSDMetric(dim)
"""
Now we define an instance of the registration class. The SyN algorithm uses
a multi-resolution approach by building a Gaussian Pyramid. We instruct the
registration instance to perform at most [n_0, n_1, ..., n_k] iterations at
each level of the pyramid. The 0-th level corresponds to the finest resolution.
"""
level_iters = [200, 100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric, level_iters, inv_iter = 50)
"""
Now we execute the optimization, which returns a DiffeomorphicMap object,
that can be used to register images back and forth between the static and moving
domains
"""
mapping = sdr.optimize(static, moving)
"""
It is a good idea to visualize the resulting deformation map to make sure the
result is reasonable (at least, visually)
"""
regtools.plot_2d_diffeomorphic_map(mapping, 10, 'diffeomorphic_map.png')
"""
.. figure:: diffeomorphic_map.png
:align: center
**Deformed lattice under the resulting diffeomorphic map**.
"""
"""
Now let's warp the moving image and see if it gets similar to the static image
"""
warped_moving = mapping.transform(moving, 'linear')
regtools.overlay_images(static, warped_moving, 'Static','Overlay','Warped moving',
'direct_warp_result.png')
"""
.. figure:: direct_warp_result.png
:align: center
**Moving image transformed under the (direct) transformation in green on top of the static image (in red)**.
"""
"""
And we can also apply the inverse mapping to verify that the warped static image
is similar to the moving image
"""
warped_static = mapping.transform_inverse(static, 'linear')
regtools.overlay_images(warped_static, moving,'Warped static','Overlay','Moving',
'inverse_warp_result.png')
"""
.. figure:: inverse_warp_result.png
:align: center
**Static image transformed under the (inverse) transformation in red on top of the moving image (in green)**.
"""
"""
Now let's register a couple of slices from a B0 image using the Cross
Correlation metric. Also, let's inspect the evolution of the registration.
To do this we will define a function that will be called by the registration
object at each stage of the optimization process. We will draw the current
warped images after finishing each resolution.
"""
def callback_CC(sdr, status):
#Status indicates at which stage of the optimization we currently are
#For now, we will only react at the end of each resolution of the scale
#space
if status == imwarp.RegistrationStages.SCALE_END:
#get the current images from the metric
wmoving = sdr.metric.moving_image
wstatic = sdr.metric.static_image
#draw the images on top of each other with different colors
regtools.overlay_images(wmoving, wstatic, 'Warped moving', 'Overlay', 'Warped static')
"""
Now we are ready to configure and run the registration. First load the data
"""
from dipy.data.fetcher import fetch_syn_data, read_syn_data
from dipy.segment.mask import median_otsu
fetch_syn_data()
t1, b0 = read_syn_data()
data = np.array(b0.get_data(), dtype = np.float64)
"""
We first remove the skull from the B0 volume
"""
b0_mask, mask = median_otsu(data, 4, 4)
"""
And select two slices to try the 2D registration
"""
static = b0_mask[:, :, 40]
moving = b0_mask[:, :, 38]
"""
After loading the data, we instantiate the Cross Correlation metric. The metric
receives three parameters: the dimension of the input images, the standard
deviation of the Gaussian Kernel to be used to regularize the gradient and the
radius of the window to be used for evaluating the local normalized cross
correlation.
"""
sigma_diff = 3.0
radius = 4
metric = CCMetric(2, sigma_diff, radius)
"""
Let's use a scale space of 3 levels
"""
level_iters = [100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
sdr.callback = callback_CC
"""
And execute the optimization
"""
mapping = sdr.optimize(static, moving)
warped = mapping.transform(moving)
'''
We can see the effect of the warping by switching between the images before and
after registration
'''
regtools.overlay_images(static, moving, 'Static', 'Overlay', 'Moving',
't1_slices_input.png')
"""
.. figure:: t1_slices_input.png
:align: center
**Input images**.
"""
regtools.overlay_images(static, warped, 'Static', 'Overlay', 'Warped moving',
't1_slices_res.png')
"""
.. figure:: t1_slices_res.png
:align: center
**Moving image transformed under the (direct) transformation in green on top of the static image (in red)**.
"""
'''
And we can apply the inverse warping too
'''
inv_warped = mapping.transform_inverse(static)
regtools.overlay_images(inv_warped, moving, 'Warped static', 'Overlay', 'moving',
't1_slices_res2.png')
"""
.. figure:: t1_slices_res2.png
:align: center
**Static image transformed under the (inverse) transformation in red on top of the moving image (in green)**.
"""
'''
Finally, let's see the deformation
'''
regtools.plot_2d_diffeomorphic_map(mapping, 5, 'diffeomorphic_map_b0s.png')
"""
.. figure:: diffeomorphic_map_b0s.png
:align: center
**Deformed lattice under the resulting diffeomorphic map**.
.. [Avants09] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). Symmetric Diffeomorphic Image Registration with Cross- Correlation: Evaluating Automated Labeling of Elderly and Neurodegenerative Brain, 12(1), 26-41.\
.. [Avants11] Avants, B. B., Tustison, N., & Song, G. (2011). Advanced Normalization Tools ( ANTS ), 1-35.
.. include:: ../links_names.inc
"""
|
StongeEtienne/dipy
|
doc/examples/syn_registration_2d.py
|
Python
|
bsd-3-clause
| 7,049
|
[
"Gaussian"
] |
0bd45ddf9aeaf5b9cd83930be2f4e18d6d40df533075b323ff919b3c55179909
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import string
from neuron import h
import numbers
# a helper library, included with NEURON
h.load_file('stdlib.hoc')
h.load_file('import3d.hoc')
class Cell:
def __init__(self,name='neuron',soma=None,apic=None,dend=None,axon=None):
self.soma = soma if soma is not None else []
self.apic = apic if apic is not None else []
self.dend = dend if dend is not None else []
self.axon = axon if axon is not None else []
self.all = self.soma + self.apic + self.dend + self.axon
def delete(self):
self.soma = None
self.apic = None
self.dend = None
self.axon = None
self.all = None
def __str__(self):
return self.name
def load(filename, fileformat=None, cell=None, use_axon=True, xshift=0, yshift=0, zshift=0):
"""
Load an SWC from filename and instantiate inside cell. Code kindly provided
by @ramcdougal.
Args:
filename = .swc file containing morphology
cell = Cell() object. (Default: None, creates new object)
filename = the filename of the SWC file
use_axon = include the axon? Default: True (yes)
xshift, yshift, zshift = use to position the cell
Returns:
Cell() object with populated soma, axon, dend, & apic fields
Minimal example:
# pull the morphology for the demo from NeuroMorpho.Org
from PyNeuronToolbox import neuromorphoorg, load
with open('c91662.swc', 'w') as f:
f.write(neuromorphoorg.morphology('c91662'))
cell = load(filename)
"""
if cell is None:
cell = Cell(name=string.join(filename.split('.')[:-1]))
if fileformat is None:
fileformat = filename.split('.')[-1]
name_form = {1: 'soma[%d]', 2: 'axon[%d]', 3: 'dend[%d]', 4: 'apic[%d]'}
# load the data. Use Import3d_SWC_read for swc, Import3d_Neurolucida3 for
# Neurolucida V3, Import3d_MorphML for MorphML (level 1 of NeuroML), or
# Import3d_Eutectic_read for Eutectic.
if fileformat == 'swc':
morph = h.Import3d_SWC_read()
elif fileformat == 'asc':
morph = h.Import3d_Neurolucida3()
else:
raise Exception('file format `%s` not recognized'%(fileformat))
morph.input(filename)
# easiest to instantiate by passing the loaded morphology to the Import3d_GUI
# tool; with a second argument of 0, it won't display the GUI, but it will allow
# use of the GUI's features
i3d = h.Import3d_GUI(morph, 0)
# get a list of the swc section objects
swc_secs = i3d.swc.sections
swc_secs = [swc_secs.object(i) for i in xrange(int(swc_secs.count()))]
# initialize the lists of sections
sec_list = {1: cell.soma, 2: cell.axon, 3: cell.dend, 4: cell.apic}
# name and create the sections
real_secs = {}
for swc_sec in swc_secs:
cell_part = int(swc_sec.type)
# skip everything else if it's an axon and we're not supposed to
# use it... or if is_subsidiary
if (not(use_axon) and cell_part == 2) or swc_sec.is_subsidiary:
continue
# figure out the name of the new section
if cell_part not in name_form:
raise Exception('unsupported point type')
name = name_form[cell_part] % len(sec_list[cell_part])
# create the section
sec = h.Section(name=name, cell=cell)
# connect to parent, if any
if swc_sec.parentsec is not None:
sec.connect(real_secs[swc_sec.parentsec.hname()](swc_sec.parentx))
# define shape
if swc_sec.first == 1:
h.pt3dstyle(1, swc_sec.raw.getval(0, 0), swc_sec.raw.getval(1, 0),
swc_sec.raw.getval(2, 0), sec=sec)
j = swc_sec.first
xx, yy, zz = [swc_sec.raw.getrow(i).c(j) for i in xrange(3)]
dd = swc_sec.d.c(j)
if swc_sec.iscontour_:
# never happens in SWC files, but can happen in other formats supported
# by NEURON's Import3D GUI
raise Exception('Unsupported section style: contour')
if dd.size() == 1:
# single point soma; treat as sphere
x, y, z, d = [dim.x[0] for dim in [xx, yy, zz, dd]]
for xprime in [x - d / 2., x, x + d / 2.]:
h.pt3dadd(xprime + xshift, y + yshift, z + zshift, d, sec=sec)
else:
for x, y, z, d in zip(xx, yy, zz, dd):
h.pt3dadd(x + xshift, y + yshift, z + zshift, d, sec=sec)
# store the section in the appropriate list in the cell and lookup table
sec_list[cell_part].append(sec)
real_secs[swc_sec.hname()] = sec
cell.all = cell.soma + cell.apic + cell.dend + cell.axon
return cell
def sequential_spherical(xyz):
"""
Converts sequence of cartesian coordinates into a sequence of
line segments defined by spherical coordinates.
Args:
xyz = 2d numpy array, each row specifies a point in
cartesian coordinates (x,y,z) tracing out a
path in 3D space.
Returns:
r = lengths of each line segment (1D array)
theta = angles of line segments in XY plane (1D array)
phi = angles of line segments down from Z axis (1D array)
"""
d_xyz = np.diff(xyz,axis=0)
r = np.linalg.norm(d_xyz,axis=1)
theta = np.arctan2(d_xyz[:,1], d_xyz[:,0])
hyp = d_xyz[:,0]**2 + d_xyz[:,1]**2
phi = np.arctan2(np.sqrt(hyp), d_xyz[:,2])
return (r,theta,phi)
def spherical_to_cartesian(r,theta,phi):
"""
Simple conversion of spherical to cartesian coordinates
Args:
r,theta,phi = scalar spherical coordinates
Returns:
x,y,z = scalar cartesian coordinates
"""
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
return (x,y,z)
def find_coord(targ_length,xyz,rcum,theta,phi):
"""
Find (x,y,z) ending coordinate of segment path along section
path.
Args:
targ_length = scalar specifying length of segment path, starting
from the begining of the section path
xyz = coordinates specifying the section path
rcum = cumulative sum of section path length at each node in xyz
theta, phi = angles between each coordinate in xyz
"""
# [1] Find spherical coordinates for the line segment containing
# the endpoint.
# [2] Find endpoint in spherical coords and convert to cartesian
i = np.nonzero(rcum <= targ_length)[0][-1]
if i == len(theta):
return xyz[-1,:]
else:
r_lcl = targ_length-rcum[i] # remaining length along line segment
(dx,dy,dz) = spherical_to_cartesian(r_lcl,theta[i],phi[i])
return xyz[i,:] + [dx,dy,dz]
def interpolate_jagged(xyz,nseg):
"""
Interpolates along a jagged path in 3D
Args:
xyz = section path specified in cartesian coordinates
nseg = number of segment paths in section path
Returns:
interp_xyz = interpolated path
"""
# Spherical coordinates specifying the angles of all line
# segments that make up the section path
(r,theta,phi) = sequential_spherical(xyz)
# cumulative length of section path at each coordinate
rcum = np.append(0,np.cumsum(r))
# breakpoints for segment paths along section path
breakpoints = np.linspace(0,rcum[-1],nseg+1)
np.delete(breakpoints,0)
# Find segment paths
seg_paths = []
for a in range(nseg):
path = []
# find (x,y,z) starting coordinate of path
if a == 0:
start_coord = xyz[0,:]
else:
start_coord = end_coord # start at end of last path
path.append(start_coord)
# find all coordinates between the start and end points
start_length = breakpoints[a]
end_length = breakpoints[a+1]
mid_boolean = (rcum > start_length) & (rcum < end_length)
mid_indices = np.nonzero(mid_boolean)[0]
for mi in mid_indices:
path.append(xyz[mi,:])
# find (x,y,z) ending coordinate of path
end_coord = find_coord(end_length,xyz,rcum,theta,phi)
path.append(end_coord)
# Append path to list of segment paths
seg_paths.append(np.array(path))
# Return all segment paths
return seg_paths
def get_section_path(h,sec):
n3d = int(h.n3d(sec=sec))
xyz = []
for i in range(0,n3d):
xyz.append([h.x3d(i,sec=sec),h.y3d(i,sec=sec),h.z3d(i,sec=sec)])
xyz = np.array(xyz)
return xyz
def shapeplot(h,ax,sections=None,order='pre',cvals=None,\
clim=None,cmap=cm.YlOrBr_r,**kwargs):
"""
Plots a 3D shapeplot
Args:
h = hocObject to interface with neuron
ax = matplotlib axis for plotting
sections = list of h.Section() objects to be plotted
order = { None= use h.allsec() to get sections
'pre'= pre-order traversal of morphology }
cvals = list/array with values mapped to color by cmap; useful
for displaying voltage, calcium or some other state
variable across the shapeplot.
**kwargs passes on to matplotlib (e.g. color='r' for red lines)
Returns:
lines = list of line objects making up shapeplot
"""
# Default is to plot all sections.
if sections is None:
if order == 'pre':
sections = allsec_preorder(h) # Get sections in "pre-order"
else:
sections = list(h.allsec())
# Determine color limits
if cvals is not None and clim is None:
cn = [ isinstance(cv, numbers.Number) for cv in cvals ]
if any(cn):
clim = [np.min(cvals[cn]), np.max(cvals[cn])]
# Plot each segement as a line
lines = []
i = 0
for sec in sections:
xyz = get_section_path(h,sec)
seg_paths = interpolate_jagged(xyz,sec.nseg)
for (j,path) in enumerate(seg_paths):
line, = plt.plot(path[:,0], path[:,1], path[:,2], '-k',**kwargs)
if cvals is not None:
if isinstance(cvals[i], numbers.Number):
# map number to colormap
col = cmap(int((cvals[i]-clim[0])*255/(clim[1]-clim[0])))
else:
# use input directly. E.g. if user specified color with a string.
col = cvals[i]
line.set_color(col)
lines.append(line)
i += 1
return lines
def shapeplot_animate(v,lines,nframes=None,tscale='linear',\
clim=[-80,50],cmap=cm.YlOrBr_r):
""" Returns animate function which updates color of shapeplot """
if nframes is None:
nframes = v.shape[0]
if tscale == 'linear':
def animate(i):
i_t = int((i/nframes)*v.shape[0])
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
elif tscale == 'log':
def animate(i):
i_t = int(np.round((v.shape[0] ** (1.0/(nframes-1))) ** i - 1))
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
else:
raise ValueError("Unrecognized option '%s' for tscale" % tscale)
return animate
def mark_locations(h,section,locs,markspec='or',**kwargs):
"""
Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers
"""
# get list of cartesian coordinates specifying section path
xyz = get_section_path(h,section)
(r,theta,phi) = sequential_spherical(xyz)
rcum = np.append(0,np.cumsum(r))
# convert locs into lengths from the beginning of the path
if type(locs) is float or type(locs) is np.float64:
locs = np.array([locs])
if type(locs) is list:
locs = np.array(locs)
lengths = locs*rcum[-1]
# find cartesian coordinates for markers
xyz_marks = []
for targ_length in lengths:
xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))
xyz_marks = np.array(xyz_marks)
# plot markers
line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \
xyz_marks[:,2], markspec, **kwargs)
return line
def root_sections(h):
"""
Returns a list of all sections that have no parent.
"""
roots = h.SectionList()
roots.allroots()
return list(roots)
def leaf_sections(h):
"""
Returns a list of all sections that have no children.
"""
return [sec for sec in h.allsec() if not sec.children()]
def root_indices(sec_list):
"""
Returns the index of all sections without a parent.
"""
return [i for i, sec in enumerate(sec_list) if sec.parentseg() is None]
def allsec_preorder(h):
"""
Alternative to using h.allsec(). This returns all sections in order from
the root. Traverses the topology each neuron in "pre-order"
"""
#Iterate over all sections, find roots
roots = root_sections(h)
# Build list of all sections
sec_list = []
for r in roots:
add_pre(h,sec_list,r)
return sec_list
def add_pre(h,sec_list,section,order_list=None,branch_order=None):
"""
A helper function that traverses a neuron's morphology (or a sub-tree)
of the morphology in pre-order. This is usually not necessary for the
user to import.
"""
sec_list.append(section)
sref = h.SectionRef(sec=section)
if branch_order is not None:
order_list.append(branch_order)
if len(sref.child) > 1:
branch_order += 1
for next_node in sref.child:
add_pre(h,sec_list,next_node,order_list,branch_order)
def dist_between(h,seg1,seg2):
"""
Calculates the distance between two segments. Adapted from
a post by Michael Hines on the NEURON forum
(www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114)
Note: In NEURON 7.7+, you can just do return h.distance(seg1, seg2)
"""
h.distance(0, seg1)
return h.distance(seg2)
def all_branch_orders(h):
"""
Produces a list branch orders for each section (following pre-order tree
traversal)
"""
#Iterate over all sections, find roots
roots = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(section)
# Build list of all sections
order_list = []
for r in roots:
add_pre(h,[],r,order_list,0)
return order_list
def branch_order(h,section, path=[]):
"""
Returns the branch order of a section
"""
path.append(section)
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
return 0 # section is a root
else:
nchild = len(list(h.SectionRef(sec=sref.parent).child))
if nchild <= 1.1:
return branch_order(h,sref.parent,path)
else:
return 1+branch_order(h,sref.parent,path)
def dist_to_mark(h, section, secdict, path=[]):
path.append(section)
sref = h.SectionRef(sec=section)
# print 'current : '+str(section)
# print 'parent : '+str(sref.parent)
if secdict[sref.parent] is None:
# print '-> go to parent'
s = section.L + dist_to_mark(h, sref.parent, secdict, path)
# print 'summing, '+str(s)
return s
else:
# print 'end <- start summing: '+str(section.L)
return section.L # parent is marked
def branch_precedence(h):
roots = root_sections(h)
leaves = leaf_sections(h)
seclist = allsec_preorder(h)
secdict = { sec:None for sec in seclist }
for r in roots:
secdict[r] = 0
precedence = 1
while len(leaves)>0:
# build list of distances of all paths to remaining leaves
d = []
for leaf in leaves:
p = []
dist = dist_to_mark(h, leaf, secdict, path=p)
d.append((dist,[pp for pp in p]))
# longest path index
i = np.argmax([ dd[0] for dd in d ])
leaves.pop(i) # this leaf will be marked
# mark all sections in longest path
for sec in d[i][1]:
if secdict[sec] is None:
secdict[sec] = precedence
# increment precedence across iterations
precedence += 1
#prec = secdict.values()
#return [0 if p is None else 1 for p in prec], d[i][1]
return [ secdict[sec] for sec in seclist ]
from neuron import h
from neuron.rxd.morphology import parent, parent_loc
import json
def morphology_to_dict(sections, outfile=None):
section_map = {sec: i for i, sec in enumerate(sections)}
result = []
h.define_shape()
for sec in sections:
my_parent = parent(sec)
my_parent_loc = -1 if my_parent is None else parent_loc(sec, my_parent)
my_parent = -1 if my_parent is None else section_map[my_parent]
n3d = int(h.n3d(sec=sec))
result.append({
'section_orientation': h.section_orientation(sec=sec),
'parent': my_parent,
'parent_loc': my_parent_loc,
'x': [h.x3d(i, sec=sec) for i in xrange(n3d)],
'y': [h.y3d(i, sec=sec) for i in xrange(n3d)],
'z': [h.z3d(i, sec=sec) for i in xrange(n3d)],
'diam': [h.diam3d(i, sec=sec) for i in xrange(n3d)],
'name': sec.hname()
})
if outfile is not None:
with open(outfile, 'w') as f:
json.dump(result, f)
return result
def load_json(morphfile):
with open(morphfile, 'r') as f:
secdata = json.load(morphfile)
seclist = []
for sd in secdata:
# make section
sec = h.Section(name=sd['name'])
seclist.append(sec)
# make 3d morphology
for x,y,z,d in zip(sd['x'], sd['y'], sd['z'], sd('diam')):
h.pt3dadd(x, y, z, d, sec=sec)
# connect children to parent compartments
for sec,sd in zip(seclist,secdata):
if sd['parent_loc'] >= 0:
parent_sec = sec_list[sd['parent']]
sec.connect(parent_sec(sd['parent_loc']), sd['section_orientation'])
return seclist
|
ahwillia/PyNeuron-Toolbox
|
PyNeuronToolbox/morphology.py
|
Python
|
mit
| 18,827
|
[
"NEURON"
] |
12c1ed2e80852774686ae6e0b8e5ac8453e36173ef6d9c55c2e8138e4b552e03
|
from pylab import figure,scatter,contour,show,legend,connect
from numpy import array, append, arange, reshape, empty, exp
from modshogun import Gaussian, GMM
from modshogun import RealFeatures
import util
util.set_title('EM for 2d GMM example')
#set the parameters
min_cov=1e-9
max_iter=1000
min_change=1e-9
cov_type=0
#setup the real GMM
real_gmm=GMM(2)
real_gmm.set_nth_mean(array([1.0, 1.0]), 0)
real_gmm.set_nth_mean(array([-1.0, -1.0]), 1)
real_gmm.set_nth_cov(array([[1.0, 0.2],[0.2, 0.1]]), 0)
real_gmm.set_nth_cov(array([[0.3, 0.1],[0.1, 1.0]]), 1)
real_gmm.set_coef(array([0.3, 0.7]))
#generate training set from real GMM
generated=array([real_gmm.sample()])
for i in range(199):
generated=append(generated, array([real_gmm.sample()]), axis=0)
generated=generated.transpose()
feat_train=RealFeatures(generated)
#train GMM using EM
est_gmm=GMM(2, cov_type)
est_gmm.train(feat_train)
est_gmm.train_em(min_cov, max_iter, min_change)
#get and print estimated means and covariances
est_mean1=est_gmm.get_nth_mean(0)
est_mean2=est_gmm.get_nth_mean(1)
est_cov1=est_gmm.get_nth_cov(0)
est_cov2=est_gmm.get_nth_cov(1)
est_coef=est_gmm.get_coef()
print est_mean1
print est_cov1
print est_mean2
print est_cov2
print est_coef
#plot real GMM, data and estimated GMM
min_x_gen=min(min(generated[[0]]))-0.1
max_x_gen=max(max(generated[[0]]))+0.1
min_y_gen=min(min(generated[[1]]))-0.1
max_y_gen=max(max(generated[[1]]))+0.1
plot_real=empty(0)
plot_est=empty(0)
for i in arange(min_x_gen, max_x_gen, 0.05):
for j in arange(min_y_gen, max_y_gen, 0.05):
plot_real=append(plot_real, array([exp(real_gmm.cluster(array([i, j]))[2])]))
plot_est=append(plot_est, array([exp(est_gmm.cluster(array([i, j]))[2])]))
plot_real=reshape(plot_real, (arange(min_x_gen, max_x_gen, 0.05).shape[0], arange(min_y_gen, max_y_gen, 0.05).shape[0]))
plot_est=reshape(plot_est, (arange(min_x_gen, max_x_gen, 0.05).shape[0], arange(min_y_gen, max_y_gen, 0.05).shape[0]))
real_plot=contour(arange(min_x_gen, max_x_gen, 0.05), arange(min_y_gen, max_y_gen, 0.05), plot_real.transpose(), colors="b")
est_plot=contour(arange(min_x_gen, max_x_gen, 0.05), arange(min_y_gen, max_y_gen, 0.05), plot_est.transpose(), colors="r")
real_scatter=scatter(generated[[0]], generated[[1]], c="gray")
legend((real_plot.collections[0], est_plot.collections[0]), ("Real GMM", "Estimated GMM"))
connect('key_press_event', util.quit)
show()
|
AzamYahya/shogun
|
examples/undocumented/python_modular/graphical/em_2d_gmm.py
|
Python
|
gpl-3.0
| 2,425
|
[
"Gaussian"
] |
7ddde398afcde55d0a6ccb2e30c70acc01dc5cb59b4cb4ce8919e1c80dbfee7f
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.abspath('lib'))
from ansible import __version__, __author__
try:
from setuptools import setup, find_packages
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).")
sys.exit(1)
setup(name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='support@ansible.com',
url='http://ansible.com/',
license='GPLv3',
install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six'],
package_dir={ '': 'lib' },
packages=find_packages('lib'),
package_data={
'': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1', 'galaxy/data/*'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-vault',
],
data_files=[],
)
|
attakei/ansible
|
setup.py
|
Python
|
gpl-3.0
| 1,864
|
[
"Galaxy"
] |
b4cba7ac75d0565816750d28c2f7a12193e2cde154710432f43bef36d71cbb96
|
# read statistics from ASCII files and put them in netCDF
# Chiel van Heerwaarden, 2012 -- created
# Cedrick.Ansorge@gmail.com, 2013 -- Some generalizations
#
import gzip as gz
import netCDF4
import subprocess
import array as arr
import os
from pylab import *
def addGroup(gDict,gLine,ig):
garr=gLine.split()
if size(garr) > 3:
gName=garr[2]
gDict[gName] = garr[3:]
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def avg2dict(avgtype,avgpath,jmax,ftype='',tstart=-1, tend=-1,tstep=-1,reynolds=1):
print('TYPE: ', avgtype)
print('PATH: ', avgpath)
print('jmax: ', jmax)
print('fTYPE:', ftype)
ftype_count = len(ftype)
###########################################################
# get times automatically from files in directory
###########################################################
if ( tstart == -1 ) :
command = "find " + avgpath + ' -name \"' + avgtype + "[0-9]*\""+ftype
# print(command)
p=subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE)
flist = []
ordered_list = []
for file in p.stdout.readlines():
dummy = file.strip()
try:
with open(dummy):
cstart=len('{}/{}'.format(avgpath,avgtype))
cend=len(dummy)-ftype_count #strip off .gz
if ( is_number(dummy[cstart:cend]) ):
iter = int(dummy[cstart:cend])
ordered_list.append([iter,file.strip()])
except IOError:
print('ERROR - File', file, 'does not exist' )
flist = [f[1] for f in sorted(ordered_list,key=lambda file:file[0]) ]
retval = p.wait()
ntimes = len(flist)
else :
ntimes = (tend - tstart) / tstep + 1
print('FILES for', avgtype,':', ntimes )
if ftype != '.nc' and ntimes > 0 :
print(' -- OLD (ASCII) VERSION --\n first: {}\n last: {}'.format(flist[0],flist[-1]))
d,vgd=avg2dict_asc(flist,ntimes,avgtype,avgpath,jmax,ftype,tstart, tend,tstep,reynolds)
return d,vgd
elif ftype == 'nc' and ntimes > 0 :
print(' -- NEW (NETCDF) VERSION --\n first: {}\n last: {}'.format(flist[0],flist[-1]))
mergeNC(avgtype,avgpath,'nc_all.nc')
return
def mergeNC(avgtype,avgpath,ofile,reynolds=1):
command = 'ncrcat --4 -L 9 {}/{}*.nc {}.nc'.format(avgpath,avgtype,ofile)
print(command)
os.system(command)
# recalculate friction values for backward compatibility
f_h=netCDF4.Dataset('{}.nc'.format(ofile),'r+')
t=f_h['t']
du=f_h['U_y1'][:,1]
dw=f_h['W_y1'][:,1]
vF=f_h.createVariable('FrictionVelocity','f8',('t',))
vF.setncattr('Group','Friction')
vA=f_h.createVariable('FrictionAngle','f8',('t',))
vA.setncattr('Group','Friction')
nu=2./(reynolds*reynolds)
vF[:] = np.sqrt(nu*np.sqrt(du**2+dw**2))
vA[:] = np.arctan2(dw,du)
return
def avg2dict_asc(file_list,ntimes,avgtype,avgpath,jmax,ftype,tstart=-1, tend=-1,tstep=-1,reynolds=1):
ftype_count = len(ftype)
############################################################
if ( ntimes == 0 ) :
return -1
ig=0
hdrStr_old=''
update_vList=True
updateGroups=True
t_start=tstart
for t in range(ntimes):
if t_start == -1 :
filename = file_list[t]
#number starts after <path>/<file type>
cstart=len('{}/{}'.format(avgpath,avgtype))
cend=len(filename)-ftype_count #strip off .gz
filenum = filename[cstart:cend]
tend = filenum
if (t == 0):
tstart = filenum
else:
filenum = tstart+t*tstep
filename = '{}/{}{}{}'.format(avgpath,avgtype,filenum,gzip_str)
# process the file
if ftype == '.gz':
f = gz.open(filename,'r')
else:
f= open(filename,'r')
# retrieve the time
# print(filename)
datastring = f.readline().split()
if datastring[0] == 'RTIME':
time = datastring[2]
elif datastring[0].decode('UTF-8') == 'RTIME' :
time= float(datastring[2])
else:
print("ERROR getting time from file for type {}, it {}".format(avgtype,filenum))
print(datastring)
quit()
# process the group items in the header
datastring = f.readline()
d0=datastring.split()[0]
if type(d0) is bytes :
d0=str(d0.decode('UTF-8') )
if filenum == tstart:
varGroups={}
groupVars={}
while d0 == 'GROUP' or d0 == 'IMAX' or d0 == 'JMAX':
if d0 == 'GROUP'and updateGroups:
addGroup(varGroups,datastring,ig)
ig=ig+1
datastring = f.readline()
d0=datastring.split()[0]
if type(d0) is bytes:
d0=d0.decode('UTF-8')
hdrStr=datastring
hdr=datastring.split()
data = f.readline().split()
if hdrStr != hdrStr_old: # if there is a first or new header, we need to update
hdrTotal=size(hdr)
hdrProf=size(data)
hdrBulk=hdrTotal-hdrProf
hdrStr_old = hdrStr
update_vList = True
print('New HEADER for file {}\n ... {} VARIABLES total ( {} profiles + {} bulk)'.format(filename,hdrTotal,hdrProf,hdrBulk))
else:
update_vList = False
for n in range(hdrTotal * ( 1 if updateGroups else 0 )):
v=hdr[n]
for g in varGroups.keys():
if v in varGroups[g]:
groupVars[v]=g
break
updateGroups=False
if(filenum == tstart):
#initialize netCDF file
avg = {}
avg['Time'] = zeros(ntimes)
avg['Iteration'] = zeros(ntimes)
for n in range(hdrTotal):
v=hdr[n]
if n < hdrProf:
avg[v] = zeros((ntimes, jmax))
else:
avg[v] = zeros(ntimes)
elif update_vList:
# variables are initialized, but the header has changed => need to check whether we have to define something new
for n in range(hdrProf):
v=hdr[n]
if not v in avg:
print(' ... new variable {} at it {} ({})'.format(v,filenum,t))
if n < hdrProf:
avg[v] = zeros((ntimes,jmax))
else:
avg[v] = zeros(ntimes)
# if there are new variables, we will parse the group header next time to retrieve group informations
updateGroups=True
# process the data
# first store the time
avg['Time'][t] = time
avg['Iteration'][t] = filenum
for i in range(jmax):
if i>0:
data = f.readline().split()
if ( len(data) != hdrProf and len(data) != hdrTotal ) :
print("ERROR: length of line ({}) neither matches expected number of profiles ({})".format(len(data),hdrProf))
print(" nor expected number of total variables ({})".format(hdrTotal))
# process the vertical profiles
for n in range(hdrProf):
avg[hdr[n]][t,i] = data[n]
# process the time series
if(len(data) == hdrTotal):
for n in range(hdrProf, hdrTotal):
avg[hdr[n]][t] = data[n]
# calculate friction velocity and angle for backward compatibility
if ( ( 'FrictionVelocity' in avg and not 'FrictionVelocity' in hdr ) or
( 'FrictionAngle' in avg and not 'FrictionAngle' in hdr ) ):
dudy = avg['U_y1'][t,0]
dwdy = avg['W_y1'][t,0]
us = np.sqrt(np.sqrt(dudy**2+dwdy**2)/(reynolds*reynolds/2))
al = np.arctan2(dwdy,dudy)*180/np.pi if us>1e-12 else 0.
print('Calculating u* and alpha:',us,al,np.average(avg['FrictionVelocity'][:t]),np.average(avg['FrictionAngle'][:t] ) )
avg['FrictionVelocity'][t] = us
avg['FrictionAngle'][t] = al
f.close()
return avg,groupVars
#############################################################
def dict2nc(dict, ncname, flag=0,groups=-1):
# process the dictionaries to netcdf files
avgnc = netCDF4.Dataset("{}.nc".format(ncname), "w")
# retrieve dimensions
time = dict["Time"]
ntimes = time.shape[0]
y = dict["Y"]
jmax = y.shape[1]
print("Creating netCDF file with ntimes = {} and jmax = {}".format(ntimes, jmax))
# create dimensions in netCDF file
dim_y = avgnc.createDimension('y', jmax)
dim_t = avgnc.createDimension('t', ntimes)
# create variables
var_t = avgnc.createVariable('t', 'f8',('t',))
var_t.units='seconds'
var_y = avgnc.createVariable('y', 'f8',('y',))
var_y.long_name='Height above Surface'
var_y.positive='up'
var_y.standard_name='height'
var_y.units='level'
var_it= avgnc.createVariable('it','i4',('t',))
# store the data
# first, handle the dimensions
var_t[:] = dict['Time'][:]
var_y[:] = dict['Y'] [0,:]
var_it[:] = [int(f) for f in dict['Iteration'][:]]
# now make a loop through all vars in dict.
for varname in dict.keys():
if(not( (varname == "Iteration") or (varname == "Y") or \
(varname == "Time") or (varname == "I") or (varname == "J") ) ):
vardata = dict[varname]
if(len(vardata.shape) == 2):
if( (vardata.shape[0] == ntimes) and (vardata.shape[1] == jmax) ):
#print("Storing {} in 2D (t,y) array".format(varname))
var_name = avgnc.createVariable(varname,'f8',('t','y',))
var_name[:,:] = vardata
if(len(vardata.shape) == 1):
if(vardata.shape[0] == ntimes):
#print("Storing {} in 1D (t) array".format(varname))
var_name = avgnc.createVariable(varname,'f8',('t',))
var_name[:] = vardata
if groups != -1:
for v in groups.keys():
avgnc[v].setncattr('Group',groups[v])
# close the file
avgnc.close()
|
turbulencia/tlab
|
scripts/python/stats2nc.py
|
Python
|
gpl-3.0
| 10,004
|
[
"NetCDF"
] |
505ed66996e05a1153439bc248fdc75f3aab8385d32b351a54a10634d38d0dc8
|
from . import core, utils
import cgt
import ctypes, os.path as osp, hashlib, numpy as np, sys, subprocess, string, os, time, traceback, cPickle
from collections import defaultdict, namedtuple
from StringIO import StringIO
import logging
def function(inputs, outputs, dbg=None, updates=None, givens=None):
assert isinstance(inputs, list), "Inputs must be a list"
assert all(el.is_argument() for el in inputs), "Invalid input: should be a list of Argument nodes"
if isinstance(outputs, list):
assert all(isinstance(el, core.Node) for el in outputs), "Invalid output: should all be symbolic variables"
return _function_listout(inputs, outputs, dbg, updates, givens)
elif isinstance(outputs, core.Node):
f_listout = _function_listout(inputs, [outputs], dbg, updates, givens)
return lambda *args : f_listout(*args)[0]
else:
raise ValueError("Expected `outputs` to be a Node or a list of Nodes. Got an object of type %s"%type(outputs))
def _function_listout(inputs, outputs, dbg = None, updates=None, givens=None):
if isinstance(updates,dict): updates=updates.items()
if updates is None: updates = []
else: assert (isinstance(updates, list) and
all(isinstance(a,tuple) and len(a)==2
and isinstance(a[0], core.Node) and isinstance(a[1], core.Node)
for a in updates)), "updates should be a list of pairs (before, after)"
if givens is None: givens = []
else: assert all(before.is_data() for (before,_) in updates), "lhs of updates must be Data instances"
if dbg: raise core.Todo("debug functionality is broken")
outputs = [cgt.make_tuple(*x) if isinstance(x, tuple) else x for x in outputs]
interp = run_compilation_pipeline(inputs, outputs, updates, givens)
return interp
# ================================================================
# Execution
# ================================================================
def python_only():
return not hasattr(cgt,"cycgt")
def determine_devices(nodes_sorted, updatetarg2src):
# Op definitions (available impls, inplace-ness, etc) define constraints
# on possible devices for a node
if python_only():
return {node:core.Device() for node in nodes_sorted}
# (1) Get available devices for nodes, determined by which impls are available and node types
compile_info = get_compile_info()
cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
node2dev = {}
home_device = core.Device(devtype="cpu", idx=0)
for node in nodes_sorted:
default_device = node.props.get("default_device", home_device)
if node.is_scalar():
device = home_device
elif node in updatetarg2src:
device = node2dev[updatetarg2src[node]]
assert "native_"+device.devtype in node.op.available_impls, "XXX bug: update only works if final operation can be performed on target device"
elif node.is_data():
device = node.op.device
elif node.is_argument():
device = home_device
else:
if ("native_gpu" in node.op.available_impls) and ((default_device.devtype == "gpu") or ("native_cpu" not in node.op.available_impls)):
assert cuda_enabled, "trying to put op on gpu but cuda is disabled"
device = core.Device("gpu", default_device.idx)
else:
device = core.Device(devtype="cpu", idx=default_device.idx)
node2dev[node] = device
return node2dev
def is_tensor(x):
return isinstance(x.typ, core.TensorType)
def is_tuple(x):
return isinstance(x.typ, core.TupleType)
def create_interpreter(inputs, outputs, eg, node2memloc):
assert isinstance(eg, ExecutionGraph)
input_types = [input.typ for input in inputs] #pylint: disable=W0622
output_locs = [node2memloc[node] for node in outputs]
config = cgt.get_config()
backend = config["backend"]
parallel = config["parallel"]
if backend == "python":
if parallel:
raise NotImplementedError("For parallel=True, set backend=native")
# return ParallelInterpreter(eg, output_locs, input_types)
else:
return SequentialInterpreter(eg, output_locs, input_types)
elif backend == "native":
if parallel:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
else:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
else:
raise NotImplementedError("invalid backend %s"%backend)
def topsorted_shapes_first(outputs, node2shape):
# Almost identical to topsorted(...) function
# But we also need to visit the shape elements of an in-place node
# before visiting that node
marks = {}
out = []
stack = []
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
###### Changed part ######
if i.ndim > 0 and not i.is_input() and i.op.return_type=="byref":
if i in node2shape:
shpels = node2shape[i]
else:
raise core.Unreachable
# shpels = i.op.shp_apply(i.parents)
ps = ps + shpels
elif is_tuple(i):
for arrshp in node2shape[i]:
ps = ps + arrshp
##########################
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def determine_memowner(nodes_sorted, updates, node2dev):
# First determine how many "child" nodes each node has
node2child = defaultdict(list)
for node in nodes_sorted:
for parent in node.parents:
node2child[parent].append(node)
# Now traverse graph again and see where we can use the same memory
node2memowner = {} # mapping node x -> the node that owns its memory
# For updates, memlocation(RHS) = memlocation(LHS)
after2before = {after:before for (before,after) in updates}
enable_inplace_opt = core.get_config()["enable_inplace_opt"]
for node in nodes_sorted:
base = node # by default,
if node.is_argument():
pass
elif node.op.writes_to_input >= 0:
base = node2memowner[node.parents[node.op.writes_to_input]]
elif node in after2before:
base = after2before[node]
elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
nodeshape = node.op.shp_apply(node.parents)
for parent in node.parents:
if (len(node2child[parent])==1
and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
and node.dtype == parent.dtype
and _is_data_mutable(parent)):
base = parent
break
# TODO: add optimization for in-place incrementing
node2memowner[node] = base
return node2memowner
class MemCounter(object):
"""
returns `MemLocation`s with indices 0,1,...
`count` member indicates how many have been returned thus far
"""
def __init__(self):
self.count=0
def new_memloc(self, devtype):
out = MemLocation(self.count, devtype)
self.count += 1
return out
def create_execution_graph(inputs, nodes_sorted, node2shape, node2memowner, node2dev):
# node2impltype = copy.copy(node2impltype) # we'll insert transport ops
instrs = []
counter = MemCounter()
node2memloc = {}
for node in nodes_sorted:
if node not in node2dev: node2dev[node] = core.Device(devtype="cpu",idx=node2dev[node.parents[0]].idx if len(node.parents)>0 else 0)
if node.is_argument():
write_loc = counter.new_memloc(node2dev[node].devtype)
node2memloc[node] = write_loc
i = inputs.index(node)
instrs.append(LoadArgument(i, write_loc))
else:
read_locs = [node2memloc[parent] for parent in node.parents]
if node.op.return_type == "byref":
if node2memowner[node] is node:
if is_tensor(node): # just make one memory location for output
nodeshape = node2shape[node] if node.ndim > 0 else []
shape_locs = [node2memloc[shpel] for shpel in nodeshape]
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(Alloc(node.dtype, shape_locs, write_loc))
else: # if it's a tuple, we need to allocate all of the components, then build tuple
nodeshape = node2shape[node]
assert isinstance(nodeshape, tuple)
arr_locs = []
for (arrshp, arrtyp) in utils.safezip(nodeshape, node.typ):
arr_loc = counter.new_memloc(node2dev[node].devtype)
shape_locs = [node2memloc[shpel] for shpel in arrshp]
instrs.append(Alloc(arrtyp.dtype, shape_locs, arr_loc))
arr_locs.append(arr_loc)
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(BuildTup(node.typ, arr_locs, write_loc))
else:
# If this node writes to another node's memory, the devices must be the same
# this should have been enforced in determine_devices()
assert node2dev[node] == node2dev[node2memowner[node]]
write_loc = node2memloc[node2memowner[node]]
instrs.append(ReturnByRef(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
else:
assert node.op.return_type == "byval"
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(ReturnByVal(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
node2memloc[node] = write_loc
return ExecutionGraph(instrs, len(inputs), counter.count), node2memloc
def get_callable(op, input_types, devtype, prefer_python=False):
assert op.available_impls, "need to set op.available_impls"
config = core.get_config()
if (prefer_python or config["force_python_impl"]) and "python" in op.available_impls:
return op.get_py_callable(input_types)
elif config["backend"] == "python":
if "python" in op.available_impls:
return op.get_py_callable(input_types)
else:
assert devtype=="cpu", "can't use devtype=gpu with python backend"
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
raise RuntimeError("Can't find an implementation of %s suitable for python backend. Just have available_impls=%s"%(op,op.available_impls))
else: # backend = native
if devtype == "cpu":
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
print "using python impl for",op
return op.get_py_callable(input_types)
else:
if "native_gpu" in op.available_impls:
return get_native_callable(op, input_types, "gpu")
else:
raise RuntimeError("Tried to put Op %s on the GPU but I only have a python impl :("%op)
def get_native_callable(op, input_types, devtype):
nci = op.get_native_compile_info(input_types, devtype)
nci.op_str = str(op)
nci.return_type = op.return_type
nci.n_in = len(input_types)
return nci2callable(nci)
def add_transports(nodelist, node2dev, node2shape):
node2child = defaultdict(list)
for node in nodelist:
for par in node.parents:
node2child[par].append(node)
# XXX look at native compilation info, gpu deref mask
for node in nodelist:
dev = node2dev[node]
dev2copy = {}
for child in node2child[node]:
childdev = node2dev[child]
if not childdev == dev:
if childdev not in dev2copy:
nodecopy = core.Result(core.Transport(childdev), [node])
node2dev[nodecopy] = childdev
dev2copy[childdev] = nodecopy
node2shape[nodecopy] = node2shape[node]
replace_parents(child, node, dev2copy[childdev])
def replace_parents(node, before, after):
for (i,p) in enumerate(node.parents):
if p is before:
node.parents[i] = after
def run_compilation_pipeline(inputs, outputs, updates, givens):
"""
Compiles the expression graph into an execution graph.
"""
config = core.get_config()
# Phase 1: simplification and analysis of expression graph
# ------------------------------------------------------
# Add add update targets to outputs
outputs_updatetargs = outputs + [after for (_before, after) in updates]
if givens: outputs_updatetargs = core.clone(outputs_updatetargs, dict(givens))
# Do simplification + analysis pass on expression graph
outputs_updatetargs_simple, analysis, _ = \
core.simplify_and_analyze(outputs_updatetargs) if config["enable_simplification"] \
else (outputs_updatetargs, core.analyze(outputs_updatetargs), {})
# Phase 2: device targeting
# ------------------------------------------------------
outputs_updatetargs_simple = cgt.core.clone(outputs_updatetargs_simple)
analysis = core.analyze(outputs_updatetargs_simple)
# XXX inefficient to just copy the graph and redo analysis
nodelist = core.topsorted(outputs_updatetargs_simple)
updatesrcs = [before for (before, _) in updates]
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2dev = determine_devices(nodelist, {targ:src for (src,targ) in zip(updatesrcs, updatetargs_simple)})
add_transports(nodelist, node2dev, analysis["node2shape"])
# XXX we're missing stuff used for shape computation
# XXX i think we might also have unnecessary stuff from shape comp in exe graph
# Phase 3: build execution graph
# ------------------------------------------------------
# Sort nodes so that shape elements appear before a given node
nodes_sorted = topsorted_shapes_first(outputs_updatetargs_simple, analysis["node2shape"]) # XXX don't need shapes for byval ops
# For each node, figure out if its output should be written to a previous node's memory
# (memowner : "memory owner")
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2memowner = determine_memowner(nodes_sorted, zip(updatesrcs, updatetargs_simple), node2dev)
# Find the outputs we want to return
outputs_simple = outputs_updatetargs_simple[:len(outputs)] # get rid
# Generate execution graph
eg, node2memloc = create_execution_graph(
inputs, nodes_sorted, analysis["node2shape"], node2memowner, node2dev)
# print execution graph
if config["verbose"]:
print 'begin'
print '\n'.join(str(i)+'.) \t'+repr(instr) for (i,instr) in enumerate(eg.instrs))
print 'end'
# Phase 3: create C or Python interpreter for graph
# ------------------------------------------------------
interp = create_interpreter(inputs, outputs_simple, eg, node2memloc)
# Done!
return interp
# ================================================================
# Simple numeric eval via traversal
# ================================================================
def numeric_eval(output, arg2val):
"""
Numerically evaluates symbolic variable without any compilation,
by associating each argument with a value (via `arg2val`) and traversing the
computation graph
Inputs
------
output: symbolic variable or list of variables we would like to evaluate
arg2val: dictionary assigning each argument that output depends on to a numerical value
Returns
-------
Numeric value or list of numeric values of variables corresponding to output
"""
if isinstance(output, list):
assert all(isinstance(x, core.Node) for x in output), "expected a list of Nodes"
return _numeric_eval_listout(output, arg2val)
elif isinstance(output, core.Node):
return _numeric_eval_listout([output], arg2val)[0]
else:
raise ValueError("expected `output` to be a Node or a list of Nodes. Got an object of type %s"%type(output))
def _numeric_eval_listout(outputs, arg2val):
"""
Evaluate outputs numerically. arg2val is a dictionary mapping arguments to numerical values
"""
assert isinstance(outputs, list)
assert isinstance(arg2val, dict)
nodes = list(core.topsorted(outputs))
node2val = {}
for node in nodes:
if node.is_argument():
node2val[node] = core.as_valid_array(arg2val[node])
elif node.is_data():
node2val[node] = node.op.get_value()
else:
parentvals = [node2val[par] for par in node.parents]
node2val[node] = core.py_numeric_apply(node, parentvals)
# assert node.get_ndim() == np.array(node2val[node]).ndim
numeric_outputs = [node2val[node] for node in outputs]
return numeric_outputs
################################################################
### Execution graph
################################################################
MemInfo = namedtuple("MemInfo",["loc","access"])
MEM_OVERWRITE = "overwrite"
MEM_INCREMENT = "increment"
class ExecutionGraph(object):
def __init__(self, instrs, n_args, n_locs):
self.instrs = instrs
self.n_args = n_args
self.n_locs = n_locs
class MemLocation(object):
def __init__(self, idx, devtype):
assert isinstance(idx, int) and devtype in ["cpu", "gpu"]
self.index = idx
self.devtype = devtype
# TODO: dtype
def __repr__(self):
return "%%%i/%s" % (self.index, self.devtype)
# ================================================================
# Instructions
# ================================================================
class Instr(object):
def fire(self, interp):
raise NotImplementedError
class LoadArgument(Instr):
def __init__(self, ind, write_loc):
self.ind = ind
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, interp.getarg(self.ind))
def __repr__(self):
return "%s = LoadArg ind:%i" % (self.write_loc, self.ind)
class Alloc(Instr):
def __init__(self, dtype, read_locs, write_loc):
self.dtype = dtype
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
shp = tuple(interp.get(mem) for mem in self.read_locs)
prevarr = interp.get(self.write_loc)
if prevarr is None or prevarr.shape != shp:
interp.set(self.write_loc, np.ones(shp, self.dtype))
def __repr__(self):
return "%s = Alloc shp:%s dtype:%s" % (self.write_loc, str(self.read_locs), self.dtype)
class BuildTup(Instr):
def __init__(self, typ, read_locs, write_loc):
self.typ = typ
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, tuple(interp.get(loc) for loc in self.read_locs))
def __repr__(self):
return "%s = BuildTup args:%s" % (self.write_loc, str(self.read_locs))
class ReturnByRef(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
self._callable.call(
[interp.get(mem) for mem in self.read_locs],
interp.get(self.write_loc))
def __repr__(self):
return "%s = ReturnByRef op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
class ReturnByVal(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
interp.set(self.write_loc, self._callable.call([interp.get(mem) for mem in self.read_locs]))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
def __repr__(self):
return "%s = ReturnByVal op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
# ================================================================
# Compiling native code
# ================================================================
def nci2callable(nci):
template_code = gen_templated_code(nci.includes, nci.closure_triples, nci.func_code)
compile_info = get_compile_info()
prefix = utils.hash_seq1(template_code, compile_info["CPP_FLAGS"], *(src.code for src in nci.extra_srcs))
d = dict(function=_funcname(prefix), closure=_closurename(prefix),setup=_setupname(prefix),teardown=_teardownname(prefix))
fn_srcfile = core.SrcFile("c++",string.Template(template_code).substitute(d))
srcfiles = [fn_srcfile]
srcfiles.extend(core.SrcFile(sf.lang, string.Template(sf.code).substitute(d)) for sf in nci.extra_srcs)
CACHE_ROOT = compile_info["CACHE_ROOT"]
libpath = osp.join(CACHE_ROOT, prefix+".so")
if not osp.exists(libpath):
tu = TranslationUnit(srcfiles, nci.link_flags)
tu.compile(prefix, libpath)
lib = get_or_load_lib(libpath)
fptr = getattr(lib, _funcname(prefix))
setup_fptr = getattr(lib, _setupname(prefix)) if nci.setup else None
teardown_fptr = getattr(lib, _teardownname(prefix)) if nci.teardown else None
cldata = _build_closure(nci.closure_triples)
return core.NativeCallable(nci.n_in, nci.return_type, nci.op_str, fptr, cldata=cldata, setup_fptr=setup_fptr, teardown_fptr=teardown_fptr,
store_objects=nci.store_objects)
def _funcname(prefix):
return "call_"+prefix
def _setupname(prefix):
return "setup_"+prefix
def _teardownname(prefix):
return "teardown_"+prefix
def _closurename(prefix):
return "closure_"+prefix
def gen_templated_code(includes, closure_info, func_code):
s = StringIO()
includes = ["cgt_common.h"] + includes
for fname in includes:
s.write('#include "%s"\n'%fname)
gen_struct_code(closure_info, s)
s.write(func_code)
return s.getvalue()
def gen_struct_code(triples, outstream):
if triples is None:
return
outstream.write("typedef struct $closure {\n")
for (fieldname,fieldtype,_val) in triples:
outstream.write(_ctypes2str[fieldtype])
outstream.write(" ")
outstream.write(fieldname)
outstream.write(";\n")
outstream.write("} $closure;\n")
_LIBRARIES = {}
def get_or_load_lib(libname):
if libname in _LIBRARIES:
return _LIBRARIES[libname]
else:
out = ctypes.cdll.LoadLibrary(libname)
_LIBRARIES[libname] = out
return out
class TranslationUnit(object):
"""All the input that goes into building a native binary for one or more ops"""
def __init__(self, srcfiles, link_flags):
self.srcfiles = srcfiles
self.link_flags = link_flags
def compile(self, prefix, libpath):
"""
Compiles all of the files, places them in the cache directory
Then links them creating prefix.so
"""
CACHE_ROOT = get_compile_info()["CACHE_ROOT"]
cmds = ["cd %s"%CACHE_ROOT]
objs = []
for (i,(lang,code)) in enumerate(self.srcfiles):
if lang=="c++":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cpp"%i)
cmds.append(_make_cpp_compile_cmd(srcpath))
elif lang=="cuda":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cu"%i)
cmds.append(_make_cuda_compile_cmd(srcpath))
else:
raise NotImplementedError
with open(srcpath,"w") as fh: fh.write(code)
objs.append(srcpath+".o")
cmds.append(_make_link_cmd(objs, self.link_flags, libpath))
bigcmd = " && ".join(cmds)
call_and_print(bigcmd)
_COMPILE_CONFIG = None
def get_compile_info():
global _COMPILE_CONFIG
if _COMPILE_CONFIG is None:
config = core.get_config()
CGT_BUILD_ROOT = cgt.cycgt.cgt_build_root() #pylint: disable=E1101
cmake_info = {}
with open(osp.join(CGT_BUILD_ROOT,"build_info.txt")) as fh:
lines = fh.readlines()
for line in lines:
if ":=" not in line: print "skipping",line
lhs,rhs = line.split(":=")
lhs = lhs.strip()
rhs = rhs.strip()
cmake_info[lhs] = rhs
CUDA_ROOT = cmake_info["CUDA_ROOT"]
CGT_ENABLE_CUDA = cmake_info["CGT_ENABLE_CUDA"] in ["1","ON"]
CGT_ENABLE_CUDNN = cmake_info["CGT_ENABLE_CUDNN"] in ["1","ON"]
DEFINITIONS = "-DENABLE_CUDA" if CGT_ENABLE_CUDA else ""
CUDNN_ROOT = cmake_info["CUDNN_ROOT"]
cuda_library_dir = osp.join(CUDA_ROOT,"lib64") if osp.exists(osp.join(CUDA_ROOT,"lib64")) else osp.join(CUDA_ROOT,"lib")
_COMPILE_CONFIG = dict(
OPENBLAS_INCLUDE_DIR = osp.join(CGT_BUILD_ROOT,"OpenBLAS"),
CGT_INCLUDE_DIR = cmake_info["CGT_INCLUDE_DIR"],
CGT_LIBRARY_DIR = osp.join(CGT_BUILD_ROOT,"lib"),
CUDA_LIBRARY_DIR = cuda_library_dir,
CUDA_INCLUDE_DIR = osp.join(CUDA_ROOT,"include"),
CUDA_LIBRARIES = cmake_info["CUDA_LIBRARIES"],
DEFINITIONS = DEFINITIONS,
CUDA_ROOT = CUDA_ROOT,
CUDNN_ROOT = CUDNN_ROOT,
CACHE_ROOT = osp.expanduser(config["cache_dir"]),
CGT_ENABLE_CUDA = CGT_ENABLE_CUDA,
CGT_ENABLE_CUDNN = CGT_ENABLE_CUDNN,
# CGT_LIBRARY = cmake_info["CGT_LIBRARY"],
)
includes = "-I"+_COMPILE_CONFIG["CGT_INCLUDE_DIR"]
includes += " -I"+_COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"]
link_flags = ""
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: includes += " -I"+_COMPILE_CONFIG["CUDA_INCLUDE_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]: includes += " -I"+_COMPILE_CONFIG["CUDNN_ROOT"]
_COMPILE_CONFIG["INCLUDES"] = includes
link_flags = "-lcgt -L"+_COMPILE_CONFIG["CGT_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: link_flags += " -L"+_COMPILE_CONFIG["CUDA_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]:
link_flags += " -L"+_COMPILE_CONFIG["CUDNN_ROOT"]
link_flags += " -Wl,-rpath,"+_COMPILE_CONFIG["CUDNN_ROOT"]
if sys.platform == "darwin":
link_flags += " -dynamiclib -Wl,-headerpad_max_install_names"
else:
link_flags += " -shared -rdynamic"
_COMPILE_CONFIG["LINK_FLAGS"] = link_flags
cpp_flags = "-fvisibility=hidden -std=c++11 -fPIC" + (" -O0 -g" if config["debug_cpp"] else " -O3 -DNDEBUG")
if sys.platform == "darwin": cpp_flags += " -stdlib=libc++"
_COMPILE_CONFIG["CPP_FLAGS"] = cpp_flags
CACHE_ROOT = _COMPILE_CONFIG["CACHE_ROOT"]
if not osp.exists(CACHE_ROOT):
os.makedirs(CACHE_ROOT)
return _COMPILE_CONFIG
def _make_cpp_compile_cmd(srcpath):
d = get_compile_info()
return "c++ %(cpp_flags)s %(srcpath)s -c -o %(srcpath)s.o %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"],
cpp_flags=d["CPP_FLAGS"], cacheroot=d["CACHE_ROOT"])
def _make_cuda_compile_cmd(srcpath):
d = get_compile_info()
return "nvcc %(srcpath)s -c -o %(srcpath)s.o -ccbin cc -m64 -Xcompiler -fPIC -Xcompiler -O3 %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"])
def _make_link_cmd(objs, extra_link_flags, libpath):
d = get_compile_info()
iname = "-install_name %s"%osp.basename(libpath) if sys.platform=="darwin" else ""
return r"c++ %(cpp_flags)s %(objnames)s %(link_flags)s %(iname)s -o %(libpath)s"%dict(
objnames=" ".join(objs), includes=d["INCLUDES"], cpp_flags=d["CPP_FLAGS"], libpath=libpath,
link_flags=d["LINK_FLAGS"]+" "+extra_link_flags, cacheroot=d["CACHE_ROOT"], iname=iname)
def call_and_print(cmd):
print "\x1b[32m%s\x1b[0m"%cmd
subprocess.check_call(cmd,shell=True)
_ctypes2str = {
ctypes.c_byte : "uint8_t",
ctypes.c_bool : "bool",
ctypes.c_char : "char",
ctypes.c_int : "int",
ctypes.c_long : "long",
ctypes.c_void_p : "void*",
ctypes.c_double : "double",
ctypes.c_float : "float"
}
_struct_cache = {} # because creating ctypes.Structure class is slow for some reason
def _build_closure(triples):
if triples is None:
return ctypes.c_void_p(0)
vals = []
fields = []
for (fieldname,fieldtype,val) in triples:
vals.append(val)
fields.append((fieldname,fieldtype))
try:
key = cPickle.dumps(fields)
S = _struct_cache[key]
except KeyError:
class S(ctypes.Structure):
_fields_ = fields
_struct_cache[key] = S
closure = S(*vals)
return closure
################################################################
### Interpreters
################################################################
class Interpreter(object):
def __call__(self, args):
raise NotImplementedError
def get(self, mem):
raise NotImplementedError
def set(self, mem, val):
raise NotImplementedError
def getarg(self, i):
raise NotImplementedError
class SequentialInterpreter(Interpreter):
"""
Runs an execution graph
"""
def __init__(self, eg, output_locs, input_types, copy_outputs=True):
self.eg = eg
self.input_types = input_types
self.output_locs = output_locs
self.storage = [None for _ in xrange(self.eg.n_locs)]
self.args = None
self.copy_outputs = copy_outputs
def __call__(self, *args):
assert len(args) == len(self.input_types), "Wrong number of inputs provided"
self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types))
for instr in self.eg.instrs:
if profiler.on: tstart = time.time()
try:
instr.fire(self)
except Exception as e:
traceback.print_exc()
if isinstance(instr, (ReturnByRef,ReturnByVal)):
if core.get_config()["debug"]:
assert "stack" in instr.node_props
utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr)
print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>"
traceback.print_list(instr.node_props["stack"])
print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<"
raise e
else:
utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True")
raise e
else:
utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr))
raise e
if profiler.on: profiler.update(instr, time.time()-tstart)
outputs = [self.get(loc) for loc in self.output_locs]
if self.copy_outputs: outputs = map(_copy, outputs)
return outputs
# need to copy because otherwise we might mess up the data when we call func again
# todo: add option that prevents this behavior
def get(self, mem):
return self.storage[mem.index]
def set(self, mem, val):
self.storage[mem.index] = val
def getarg(self, i):
return self.args[i]
# ================================================================
# Profiler
# ================================================================
class _Profiler(object):
"""
Profiler for Python backend, i.e. Interpreter
"""
def __init__(self):
self.instr2stats = {}
self.on = False
self.t_total = 0.0
def start(self): self.on = True
def stop(self): self.on = False
def update(self, instr, elapsed):
(prevcount, prevtime) = self.instr2stats.get(instr, (0,0.0))
self.instr2stats[instr] = (prevcount+1, prevtime+elapsed)
self.t_total += elapsed
def print_stats(self):
op2stats = {}
# Collapse by Op, rather than instruction
for (instr,(count,t)) in self.instr2stats.iteritems():
if isinstance(instr, (ReturnByRef, ReturnByVal)):
opkey = str(instr.op)
elif isinstance(instr, Alloc):
opkey = "Alloc{dtype=%s,ndim=%i}"%(instr.dtype, len(instr.read_locs))
else:
opkey = instr.__class__.__name__
(prevcount, prevtime) = op2stats.get(opkey, (0, 0.0))
op2stats[opkey] = (prevcount+count, prevtime+t)
print "Total time elapsed: %.3g seconds"%self.t_total
# _print_heading("By instruction")
# _print_stats(self.instr2stats, self.t_total)
_print_heading("By Op")
_print_stats(op2stats, self.t_total)
def clear_stats(self):
self.instr2stats = {}
self.t_total = 0.0
profiler = _Profiler()
def _print_heading(heading):
heading = " " + heading + " "
width = 60
assert len(heading) < width-10
print
print "*"*width
padleft = (width-len(heading))//2
padright = width-len(heading)-padleft
print "*"*padleft + heading + "*"*padright
print "*"*width
def _print_stats(key2stats, t_total):
rows = []
for (key, (count,t)) in key2stats.iteritems():
rows.append([str(key), count, t, t/t_total])
rows = sorted(rows, key=lambda row: row[2], reverse=True)
cumsum = 0
for row in rows:
cumsum += row[3]
row.append(cumsum)
from thirdparty.tabulate import tabulate
print tabulate(rows, headers=["Instruction","Count","Time","Frac","Frac cumsum"])
def _copy(x):
if isinstance(x, np.ndarray): return x.copy()
elif isinstance(x, tuple): return tuple(el.copy() for el in x)
elif np.isscalar(x): return x # xxx is this case ok?
else: raise NotImplementedError
def typecheck_args(numargs, types):
assert len(numargs)==len(types), "wrong number of arguments. got %i, expected %i"%(len(numargs),len(types))
for (numarg,typ) in zip(numargs,types):
if isinstance(typ, core.TensorType):
assert numarg.dtype==typ.dtype and numarg.ndim==typ.ndim
# ================================================================
# Utils
# ================================================================
def _list_to_json(xs):
return [x.to_json() for x in xs]
def _is_data_mutable(node):
return not node.is_input() and not isinstance(node.op, core.Constant)
|
davmre/cgt
|
cgt/compilation.py
|
Python
|
mit
| 36,421
|
[
"VisIt"
] |
07f7da77f43b4250c7cc964af8a11cb4c31681135420fbf5d0532c8c19066ed0
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from espresso import Real3D, infinity
import espresso.unittest
from espresso.interaction.LennardJones import *
class TestLennardJones(espresso.unittest.TestCase):
def testDefaults(self):
lj=LennardJones()
self.assertEqual(lj.epsilon, 1.0)
self.assertEqual(lj.sigma, 1.0)
self.assertEqual(lj.cutoff, infinity)
self.assertEqual(lj.shift, 0.0)
def testEnergy(self):
lj=LennardJones(epsilon=2.0, sigma=2.0)
# root
self.assertAlmostEqual(lj.computeEnergy(2.0), 0.0)
self.assertAlmostEqual(lj.computeEnergy(2.0, 0.0, 0.0), 0.0)
# minimum
self.assertAlmostEqual(
lj.computeEnergy(2.0*2.0**(1.0/6.0)), -2.0)
self.assertAlmostEqual(lj.computeEnergy(0.0, 2.0*2.0**(1.0/6.0), 0.0), -2.0)
def testForce(self):
lj=LennardJones(epsilon=2.0, sigma=2.0)
# force in the minimum
self.assertAlmostEqual(
(lj.computeForce(2.0*2.0**(1.0/6.0), 0.0, 0.0) -
Real3D(0.0, 0.0, 0.0)).sqr(), 0)
def testProperties(self) :
lj=LennardJones()
lj.epsilon=2.0
lj.sigma=2.0
lj.cutoff=4.0
lj.shift=0.0
# here we test energy computation, as testing property access
# would always work
self.assertAlmostEqual(lj.computeEnergy(2.0), 0.0)
self.assertAlmostEqual(lj.computeEnergy(2.0*2.0**(1.0/6.0)), -2.0)
if __name__ == "__main__":
unittest.main()
|
BackupTheBerlios/espressopp
|
src/interaction/unittest/PTestLennardJones.py
|
Python
|
gpl-3.0
| 2,362
|
[
"ESPResSo"
] |
e9033c84cfb14882f65be528885ef8219ee646cf462fa7c173640544b2404eca
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import docutils
import os
import re
# -- Project information -----------------------------------------------------
project = 'OpenROAD'
copyright = 'The Regents of the University of California, 2021'
author = 'OpenROAD Team'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx_external_toc',
'myst_parser',
]
myst_enable_extensions = [
'amsmath',
'colon_fence',
'deflist',
'dollarmath',
'html_admonition',
'html_image',
'replacements',
'smartquotes',
'substitution',
'tasklist',
'html_image',
]
external_toc_path = 'toc.yml'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.md']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'**/LICENSE',
'**/LICENSE.md',
'README.md',
'misc/NewToolDocExample.md',
'docs/releases/PostAlpha2.1BranchMethodology.md',
'main/src/odb/src/def/README.md',
'main/src/odb/src/def/doc/README.md',
'main/src/odb/src/lef/README.md',
'main/docs',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_symbiflow_theme"
html_theme_options = {
# Specify a list of menu in Header.
# Tuples forms:
# ('Name', 'external url or path of pages in the document', boolean, 'icon name')
#
# Third argument:
# True indicates an external link.
# False indicates path of pages in the document.
#
# Fourth argument:
# Specify the icon name.
# For details see link.
# https://material.io/icons/
'header_links': [
('Home', 'index', False, 'home'),
("The OpenROAD Project", "https://theopenroadproject.org", True, 'launch'),
("GitHub", "https://github.com/The-OpenROAD-Project/OpenROAD", True, 'link')
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color': 'indigo',
# Values: Same as primary_color. (Default: pink)
'accent_color': 'blue',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer': True,
'fixed_header': True,
'header_waterfall': True,
'header_scroll': False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title': False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title': True,
# Render footer.
# Values: True, False (Default: True)
'show_footer': True,
# Hide the symbiflow links
'hide_symbiflow_links': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
def setup(app):
import os
if not os.path.exists('main'):
os.symlink('..', 'main')
prefix = '(../'
newPath = '(./main/'
with open('index.md', 'r') as f:
lines = f.read()
lines = lines.replace(prefix, newPath)
with open('index.md', 'wt') as f:
f.write(lines)
|
The-OpenROAD-Project/OpenROAD
|
docs/conf.py
|
Python
|
bsd-3-clause
| 4,852
|
[
"Amber"
] |
2be1317051a9237454b07f1c7074c2350eaa505f078d985fec32a0f1fb483f48
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
A plugin to verify the data against user-adjusted tests.
This is the research tool, not the low-level data ingerity check.
"""
from __future__ import division, print_function
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import os
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
try:
from hashlib import md5
except ImportError:
from md5 import md5
from gramps.gen.errors import WindowActiveError
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE, VERSION_DIR
from gramps.gen.constfunc import UNITYPE
from gramps.gen.lib import (ChildRefType, EventRoleType, EventType,
FamilyRelType, NameType, Person)
from gramps.gen.lib.date import Today
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gen.utils.db import family_name
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Verify_the_Data...')
#-------------------------------------------------------------------------
#
# temp storage and related functions
#
#-------------------------------------------------------------------------
_person_cache = {}
_family_cache = {}
_event_cache = {}
_today = Today().get_sort_value()
def find_event(db, handle):
if handle in _event_cache:
obj = _event_cache[handle]
else:
obj = db.get_event_from_handle(handle)
_event_cache[handle] = obj
return obj
def find_person(db, handle):
if handle in _person_cache:
obj = _person_cache[handle]
else:
obj = db.get_person_from_handle(handle)
_person_cache[handle] = obj
return obj
def find_family(db, handle):
if handle in _family_cache:
obj = _family_cache[handle]
else:
obj = db.get_family_from_handle(handle)
_family_cache[handle] = obj
return obj
def clear_cache():
_person_cache.clear()
_family_cache.clear()
_event_cache.clear()
#-------------------------------------------------------------------------
#
# helper functions
#
#-------------------------------------------------------------------------
def get_date_from_event_handle(db, event_handle, estimate=False):
if not event_handle:
return 0
event = find_event(db,event_handle)
if event:
date_obj = event.get_date_object()
if not estimate and \
(date_obj.get_day() == 0 or date_obj.get_month() == 0):
return 0
return date_obj.get_sort_value()
else:
return 0
def get_date_from_event_type(db, person, event_type, estimate=False):
if not person:
return 0
for event_ref in person.get_event_ref_list():
event = find_event(db,event_ref.ref)
if event:
if event_ref.get_role() != EventRoleType.PRIMARY and \
event.get_type() == EventType.BURIAL:
continue
if event.get_type() == event_type:
date_obj = event.get_date_object()
if not estimate and \
(date_obj.get_day() == 0 or date_obj.get_month() == 0):
return 0
return date_obj.get_sort_value()
return 0
def get_bapt_date(db, person, estimate=False):
return get_date_from_event_type(db, person,
EventType.BAPTISM, estimate)
def get_bury_date(db, person, estimate=False):
# check role on burial event
for event_ref in person.get_event_ref_list():
event = find_event(db, event_ref.ref)
if event and event.get_type() == EventType.BURIAL and \
event_ref.get_role() == EventRoleType.PRIMARY:
return get_date_from_event_type(db, person,
EventType.BURIAL, estimate)
def get_birth_date(db, person, estimate=False):
if not person:
return 0
birth_ref = person.get_birth_ref()
if not birth_ref:
ret = 0
else:
ret = get_date_from_event_handle(db,birth_ref.ref,estimate)
if estimate and (ret == 0):
ret = get_bapt_date(db,person,estimate)
return ret
def get_death_date(db, person, estimate=False):
if not person:
return 0
death_ref = person.get_death_ref()
if not death_ref:
ret = 0
else:
ret = get_date_from_event_handle(db,death_ref.ref,estimate)
if estimate and (ret == 0):
ret = get_bury_date(db,person,estimate)
return ret
def get_age_at_death(db, person, estimate):
birth_date = get_birth_date(db,person,estimate)
death_date = get_death_date(db,person,estimate)
if (birth_date > 0) and (death_date > 0):
return death_date - birth_date
return 0
def get_father(db, family):
if not family:
return None
father_handle = family.get_father_handle()
if father_handle:
return find_person(db,father_handle)
return None
def get_mother(db, family):
if not family:
return None
mother_handle = family.get_mother_handle()
if mother_handle:
return find_person(db, mother_handle)
return None
def get_child_birth_dates(db, family, estimate):
dates = []
for child_ref in family.get_child_ref_list():
child = find_person(db,child_ref.ref)
child_birth_date = get_birth_date(db, child, estimate)
if child_birth_date > 0:
dates.append(child_birth_date)
return dates
def get_n_children(db, person):
n = 0
for family_handle in person.get_family_handle_list():
family = find_family(db,family_handle)
if family:
n += len(family.get_child_ref_list())
return n
def get_marriage_date(db, family):
if not family:
return 0
for event_ref in family.get_event_ref_list():
event = find_event(db,event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY ):
date_obj = event.get_date_object()
return date_obj.get_sort_value()
return 0
#-------------------------------------------------------------------------
#
# Actual tool
#
#-------------------------------------------------------------------------
class Verify(tool.Tool, ManagedWindow, UpdateCallback):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.label = _('Data Verify tool')
self.vr = None
tool.Tool.__init__(self, dbstate, options_class, name)
ManagedWindow.__init__(self, uistate,[], self.__class__)
if uistate:
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
if uistate:
self.init_gui()
else:
self.add_results = self.add_results_cli
self.run_tool(cli=True)
def add_results_cli(self, results):
# print data for the user, no GUI
(msg,gramps_id, name, the_type, rule_id, severity, handle) = results
if severity == Rule.WARNING:
# translators: needed for Arabic, ignore otherwise
print(_("%(severity)s: %(msg)s, %(type)s: %(gid)s, %(name)s") %
{'severity':'W', 'msg':msg, 'type':the_type,
'gid':gramps_id, 'name':name})
elif severity == Rule.ERROR:
# translators: needed for Arabic, ignore otherwise
print(_("%(severity)s: %(msg)s, %(type)s: %(gid)s, %(name)s") %
{'severity':'E', 'msg':msg, 'type':the_type,
'gid':gramps_id, 'name':name})
else:
# translators: needed for Arabic, ignore otherwise
print(_("%(severity)s: %(msg)s, %(type)s: %(gid)s, %(name)s") %
{'severity':'S', 'msg':msg, 'type':the_type,
'gid':gramps_id, 'name':name})
def init_gui(self):
# Draw dialog and make it handle everything
self.vr = None
self.top = Glade()
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_verify_ok_clicked" : self.on_apply_clicked,
"on_delete_event" : self.close,
})
window = self.top.toplevel
self.set_window(window,self.top.get_object('title'),self.label)
for option in self.options.handler.options_dict:
if option in ['estimate_age', 'invdate']:
self.top.get_object(option).set_active(
self.options.handler.options_dict[option]
)
else:
self.top.get_object(option).set_value(
self.options.handler.options_dict[option]
)
self.window.show()
def build_menu_names(self, obj):
return (_("Tool settings"),self.label)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def on_apply_clicked(self, obj):
run_button = self.top.get_object('button4')
close_button = self.top.get_object('button5')
run_button.set_sensitive(False)
close_button.set_sensitive(False)
for option in self.options.handler.options_dict:
if option in ['estimate_age', 'invdate']:
self.options.handler.options_dict[option] = \
self.top.get_object(option).get_active()
else:
self.options.handler.options_dict[option] = \
self.top.get_object(option).get_value_as_int()
try:
self.vr = VerifyResults(self.dbstate, self.uistate, self.track)
self.add_results = self.vr.add_results
self.vr.load_ignored(self.db.full_name)
except WindowActiveError:
pass
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
try:
self.vr.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
except AttributeError:
pass
self.run_tool(cli=False)
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
try:
self.window.get_window().set_cursor(None)
self.vr.window.get_window().set_cursor(None)
except AttributeError:
pass
run_button.set_sensitive(True)
close_button.set_sensitive(True)
self.reset()
# Save options
self.options.handler.save_options()
def run_tool(self,cli=False):
person_handles = self.db.iter_person_handles()
for option, value in \
self.options.handler.options_dict.items():
exec('%s = %s' % (option, value), globals())
if self.vr:
self.vr.real_model.clear()
self.set_total(self.db.get_number_of_people() +
self.db.get_number_of_families())
for person_handle in person_handles:
person = find_person(self.db, person_handle)
rule_list = [
BirthAfterBapt(self.db, person),
DeathBeforeBapt(self.db, person),
BirthAfterBury(self.db, person),
DeathAfterBury(self.db, person),
BirthAfterDeath(self.db, person),
BaptAfterBury(self.db, person),
OldAge(self.db, person, oldage, estimate_age),
OldAgeButNoDeath(self.db, person, oldage, estimate_age),
UnknownGender(self.db, person),
MultipleParents(self.db, person),
MarriedOften(self.db, person, wedder),
OldUnmarried(self.db, person, oldunm, estimate_age),
TooManyChildren(self.db, person, mxchilddad, mxchildmom),
Disconnected(self.db, person),
InvalidBirthDate(self.db, person, invdate),
InvalidDeathDate(self.db, person, invdate),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
# Family-based rules
for family_handle in self.db.iter_family_handles():
family = find_family(self.db,family_handle)
rule_list = [
SameSexFamily(self.db,family),
FemaleHusband(self.db,family),
MaleWife(self.db,family),
SameSurnameFamily(self.db,family),
LargeAgeGapFamily(self.db,family, hwdif,estimate_age),
MarriageBeforeBirth(self.db,family,estimate_age),
MarriageAfterDeath(self.db,family,estimate_age),
EarlyMarriage(self.db,family,yngmar,estimate_age),
LateMarriage(self.db,family, oldmar,estimate_age),
OldParent(self.db,family, oldmom, olddad,estimate_age),
YoungParent(self.db,family,yngmom,yngdad,estimate_age),
UnbornParent(self.db,family,estimate_age),
DeadParent(self.db,family,estimate_age),
LargeChildrenSpan(self.db,family,cbspan,estimate_age),
LargeChildrenAgeDiff(self.db,family,cspace,estimate_age),
MarriedRelation(self.db,family),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
#-------------------------------------------------------------------------
#
# Display the results
#
#-------------------------------------------------------------------------
class VerifyResults(ManagedWindow):
IGNORE_COL = 0
WARNING_COL = 1
OBJ_ID_COL = 2
OBJ_NAME_COL = 3
OBJ_TYPE_COL = 4
RULE_ID_COL = 5
OBJ_HANDLE_COL = 6
FG_COLOR_COL = 7
TRUE_COL = 8
SHOW_COL = 9
def __init__(self,dbstate,uistate,track):
self.title = _('Data Verification Results')
ManagedWindow.__init__(self,uistate,track,self.__class__)
self.dbstate = dbstate
self.top = Glade(toplevel="verify_result")
window = self.top.toplevel
self.set_window(window,self.top.get_object('title2'),self.title)
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_verify_ok_clicked" : self.__dummy,
"on_help_clicked" : self.__dummy,
})
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.hide_button = self.top.get_object('hide_button')
self.hide_button.connect('toggled',self.hide_toggled)
self.mark_button = self.top.get_object('mark_all')
self.mark_button.connect('clicked',self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_all')
self.unmark_button.connect('clicked',self.unmark_clicked)
self.invert_button = self.top.get_object('invert_all')
self.invert_button.connect('clicked',self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING, object,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN)
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add ignore column
ignore_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=VerifyResults.IGNORE_COL)
ignore_column.set_sort_column_id(VerifyResults.IGNORE_COL)
self.warn_tree.append_column(ignore_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer )
img_column.set_cell_data_func(self.img_renderer,self.get_image)
self.warn_tree.append_column(img_column)
# Add column with the warning text
warn_column = Gtk.TreeViewColumn(_('Warning'), self.renderer,
text=VerifyResults.WARNING_COL,
foreground=VerifyResults.FG_COLOR_COL)
warn_column.set_sort_column_id(VerifyResults.WARNING_COL)
self.warn_tree.append_column(warn_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=VerifyResults.OBJ_ID_COL,
foreground=VerifyResults.FG_COLOR_COL)
id_column.set_sort_column_id(VerifyResults.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=VerifyResults.OBJ_NAME_COL,
foreground=VerifyResults.FG_COLOR_COL)
name_column.set_sort_column_id(VerifyResults.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.window.show()
self.window_shown = False
def __dummy(self, obj):
"""dummy callback, needed because VerifyResults is in same glade file
as Verify, so callbacks of Verify must be defined.
"""
pass
def load_ignored(self, db_filename):
if sys.version_info[0] >= 3 and isinstance(db_filename, UNITYPE):
db_filename = db_filename.encode('utf-8')
md5sum = md5(db_filename)
## a new Gramps major version means recreating the .vfm file.
## User can copy over old one, with name of new one, but no guarantee
## that will work.
self.ignores_filename = os.path.join(
VERSION_DIR, md5sum.hexdigest() + os.path.extsep + 'vfm')
if not self._load_ignored(self.ignores_filename):
self.ignores = {}
def _load_ignored(self, filename):
try:
try:
f = open(filename, 'rb')
except IOError:
return False
self.ignores = pickle.load(f)
f.close()
return True
except (IOError, EOFError):
f.close()
return False
def save_ignored(self, new_ignores):
self.ignores = new_ignores
self._save_ignored(self.ignores_filename)
def _save_ignored(self,filename):
try:
f = open(filename,'wb')
pickle.dump(self.ignores, f, 1)
f.close()
return True
except IOError:
return False
def get_marking(self, handle,rule_id):
if handle in self.ignores:
return (rule_id in self.ignores[handle])
else:
return False
def get_new_marking(self):
new_ignores = {}
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
ignore = row[VerifyResults.IGNORE_COL]
if ignore:
handle = row[VerifyResults.OBJ_HANDLE_COL]
rule_id = row[VerifyResults.RULE_ID_COL]
if handle not in new_ignores:
new_ignores[handle] = set()
new_ignores[handle].add(rule_id)
return new_ignores
def close(self, *obj):
new_ignores = self.get_new_marking()
self.save_ignored(new_ignores)
ManagedWindow.close(self,*obj)
def hide_toggled(self, button):
if button.get_active():
button.set_label(_("_Show all"))
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.SHOW_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
else:
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
button.set_label(_("_Hide marked"))
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
filt_path = self.sort_model.convert_path_to_child_path(Gtk.TreePath(sort_path))
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.IGNORE_COL]
self.real_model.row_changed(real_path,row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = True
row[VerifyResults.SHOW_COL] = False
self.filt_model.refilter()
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = False
row[VerifyResults.SHOW_COL] = True
self.filt_model.refilter()
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.SHOW_COL]
self.filt_model.refilter()
def double_click(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
filt_path = self.sort_model.convert_path_to_child_path(sort_path)
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
the_type = row[VerifyResults.OBJ_TYPE_COL]
handle = row[VerifyResults.OBJ_HANDLE_COL]
if the_type == 'Person':
try:
person = self.dbstate.db.get_person_from_handle(handle)
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
elif the_type == 'Family':
try:
family = self.dbstate.db.get_family_from_handle(handle)
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter, user_data=None):
the_type = model.get_value(iter, VerifyResults.OBJ_TYPE_COL)
if the_type == 'Person':
cell.set_property('stock-id', 'gramps-person' )
elif the_type == 'Family':
cell.set_property('stock-id', 'gramps-family' )
def add_results(self,results):
(msg,gramps_id, name,the_type,rule_id,severity, handle) = results
ignore = self.get_marking(handle,rule_id)
if severity == Rule.ERROR:
fg = 'red'
# fg = '#8b008b' # purple
# elif severity == Rule.WARNING:
# fg = '#008b00' # green
else:
fg = None
self.real_model.append(row=[ignore,msg,gramps_id, name,
the_type,rule_id, handle,fg,
True, not ignore])
if not self.window_shown:
self.window.show()
self.window_shown = True
def build_menu_names(self, obj):
return (self.title,None)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class VerifyOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
# Options specific for this report
self.options_dict = {
'oldage' : 90,
'hwdif' : 30,
'cspace' : 8,
'cbspan' : 25,
'yngmar' : 17,
'oldmar' : 50,
'oldmom' : 48,
'yngmom' : 17,
'yngdad' : 18,
'olddad' : 65,
'wedder' : 3,
'mxchildmom' : 12,
'mxchilddad' : 15,
'lngwdw' : 30,
'oldunm' : 99,
'estimate_age' : 0,
'invdate' : 1,
}
self.options_help = {
'oldage' : ("=num","Maximum age","Age in years"),
'hwdif' : ("=num","Maximum husband-wife age difference",
"Age difference in years"),
'cspace' : ("=num",
"Maximum number of years between children",
"Number of years"),
'cbspan' : ("=num",
"Maximum span of years for all children",
"Span in years"),
'yngmar' : ("=num","Minimum age to marry","Age in years"),
'oldmar' : ("=num","Maximum age to marry","Age in years"),
'oldmom' : ("=num","Maximum age to bear a child",
"Age in years"),
'yngmom' : ("=num","Minimum age to bear a child",
"Age in years"),
'yngdad' : ("=num","Minimum age to father a child",
"Age in years"),
'olddad' : ("=num","Maximum age to father a child",
"Age in years"),
'wedder' : ("=num","Maximum number of spouses for a person",
"Number of spouses"),
'mxchildmom' : ("=num","Maximum number of children for a woman",
"Number of children"),
'mxchilddad' : ("=num","Maximum number of children for a man",
"Number of chidlren"),
'lngwdw' : ("=num","Maximum number of consecutive years "
"of widowhood before next marriage",
"Number of years"),
'oldunm' : ("=num","Maximum age for an unmarried person"
"Number of years"),
'estimate_age' : ("=0/1","Whether to estimate missing or inexact dates",
["Do not estimate","Estimate dates"],
True),
'invdate' : ("=0/1","Whether to check for invalid dates"
"Do not identify invalid dates",
"Identify invalid dates", True),
}
#-------------------------------------------------------------------------
#
# Base classes for different tests -- the rules
#
#-------------------------------------------------------------------------
class Rule(object):
"""
Basic class for use in this tool.
Other rules must inherit from this.
"""
ID = 0
TYPE = ''
ERROR = 1
WARNING = 2
SEVERITY = WARNING
def __init__(self,db, obj):
self.db = db
self.obj = obj
def broken(self):
"""
Return boolean indicating whether this rule is violated.
"""
return False
def get_message(self):
assert False, "Need to be overriden in the derived class"
def get_name(self):
assert False, "Need to be overriden in the derived class"
def get_handle(self):
return self.obj.handle
def get_id(self):
return self.obj.gramps_id
def get_level(self):
return Rule.WARNING
def get_rule_id(self):
params = self._get_params()
return (self.ID,params)
def _get_params(self):
return tuple()
def report_itself(self):
handle = self.get_handle()
the_type = self.TYPE
rule_id = self.get_rule_id()
severity = self.SEVERITY
name = self.get_name()
gramps_id = self.get_id()
msg = self.get_message()
return (msg,gramps_id, name,the_type,rule_id,severity, handle)
class PersonRule(Rule):
"""
Person-based class.
"""
TYPE = 'Person'
def get_name(self):
return self.obj.get_primary_name().get_name()
class FamilyRule(Rule):
"""
Family-based class.
"""
TYPE = 'Family'
def get_name(self):
return family_name(self.obj,self.db)
#-------------------------------------------------------------------------
#
# Actual rules for testing
#
#-------------------------------------------------------------------------
class BirthAfterBapt(PersonRule):
ID = 1
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db,self.obj)
bapt_date = get_bapt_date(self.db,self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
bapt_ok = bapt_date > 0 if bapt_date is not None else False
return (birth_ok and bapt_ok and birth_date > bapt_date)
def get_message(self):
return _("Baptism before birth")
class DeathBeforeBapt(PersonRule):
ID = 2
SEVERITY = Rule.ERROR
def broken(self):
death_date = get_death_date(self.db,self.obj)
bapt_date = get_bapt_date(self.db,self.obj)
bapt_ok = bapt_date > 0 if bapt_date is not None else False
death_ok = death_date > 0 if death_date is not None else False
return (death_ok and bapt_ok and bapt_date > death_date)
def get_message(self):
return _("Death before baptism")
class BirthAfterBury(PersonRule):
ID = 3
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db, self.obj)
bury_date = get_bury_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return (birth_ok and bury_ok and birth_date > bury_date)
def get_message(self):
return _("Burial before birth")
class DeathAfterBury(PersonRule):
ID = 4
SEVERITY = Rule.ERROR
def broken(self):
death_date = get_death_date(self.db,self.obj)
bury_date = get_bury_date(self.db,self.obj)
death_ok = death_date > 0 if death_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return (death_ok and bury_ok and death_date > bury_date)
def get_message(self):
return _("Burial before death")
class BirthAfterDeath(PersonRule):
ID = 5
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db,self.obj)
death_date = get_death_date(self.db,self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
death_ok = death_date > 0 if death_date is not None else False
return (birth_ok and death_ok and birth_date > death_date)
def get_message(self):
return _("Death before birth")
class BaptAfterBury(PersonRule):
ID = 6
SEVERITY = Rule.ERROR
def broken(self):
bapt_date = get_bapt_date(self.db,self.obj)
bury_date = get_bury_date(self.db,self.obj)
bapt_ok = bapt_date > 0 if bapt_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return (bapt_ok and bury_ok and bapt_date > bury_date)
def get_message(self):
return _("Burial before baptism")
class OldAge(PersonRule):
ID = 7
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_age,est):
PersonRule.__init__(self,db,person)
self.old_age = old_age
self.est = est
def _get_params(self):
return (self.old_age,self.est)
def broken(self):
age_at_death = get_age_at_death(self.db, self.obj, self.est)
return (age_at_death/365 > self.old_age)
def get_message(self):
return _("Old age at death")
class UnknownGender(PersonRule):
ID = 8
SEVERITY = Rule.WARNING
def broken(self):
female = self.obj.get_gender() == Person.FEMALE
male = self.obj.get_gender() == Person.MALE
return not (male or female)
def get_message(self):
return _("Unknown gender")
class MultipleParents(PersonRule):
ID = 9
SEVERITY = Rule.WARNING
def broken(self):
n_parent_sets = len(self.obj.get_parent_family_handle_list())
return (n_parent_sets>1)
def get_message(self):
return _("Multiple parents")
class MarriedOften(PersonRule):
ID = 10
SEVERITY = Rule.WARNING
def __init__(self,db,person,wedder):
PersonRule.__init__(self,db,person)
self.wedder = wedder
def _get_params(self):
return (self.wedder,)
def broken(self):
n_spouses = len(self.obj.get_family_handle_list())
return (n_spouses>self.wedder)
def get_message(self):
return _("Married often")
class OldUnmarried(PersonRule):
ID = 11
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_unm,est):
PersonRule.__init__(self,db,person)
self.old_unm = old_unm
self.est = est
def _get_params(self):
return (self.old_unm,self.est)
def broken(self):
age_at_death = get_age_at_death(self.db,self.obj,self.est)
n_spouses = len(self.obj.get_family_handle_list())
return (age_at_death/365 > self.old_unm and n_spouses==0)
def get_message(self):
return _("Old and unmarried")
class TooManyChildren(PersonRule):
ID = 12
SEVERITY = Rule.WARNING
def __init__(self,db, obj,mx_child_dad,mx_child_mom):
PersonRule.__init__(self,db, obj)
self.mx_child_dad = mx_child_dad
self.mx_child_mom = mx_child_mom
def _get_params(self):
return (self.mx_child_dad,self.mx_child_mom)
def broken(self):
n_child = get_n_children(self.db,self.obj)
if (self.obj.get_gender == Person.MALE
and n_child > self.mx_child_dad):
return True
if (self.obj.get_gender == Person.FEMALE
and n_child > self.mx_child_mom):
return True
return False
def get_message(self):
return _("Too many children")
class SameSexFamily(FamilyRule):
ID = 13
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
same_sex = (mother and father and
(mother.get_gender() == father.get_gender()))
unknown_sex = (mother and
(mother.get_gender() == Person.UNKNOWN))
return (same_sex and not unknown_sex)
def get_message(self):
return _("Same sex marriage")
class FemaleHusband(FamilyRule):
ID = 14
SEVERITY = Rule.WARNING
def broken(self):
father = get_father(self.db,self.obj)
return (father and (father.get_gender() == Person.FEMALE))
def get_message(self):
return _("Female husband")
class MaleWife(FamilyRule):
ID = 15
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db,self.obj)
return (mother and (mother.get_gender() == Person.MALE))
def get_message(self):
return _("Male wife")
class SameSurnameFamily(FamilyRule):
ID = 16
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
_broken = False
# Make sure both mother and father exist.
if mother and father:
mname = mother.get_primary_name()
fname = father.get_primary_name()
# Only compare birth names (not married names).
if mname.get_type() == NameType.BIRTH and \
fname.get_type() == NameType.BIRTH:
# Empty names don't count.
if len(mname.get_surname()) != 0 and \
len(fname.get_surname()) != 0:
# Finally, check if the names are the same.
if mname.get_surname() == fname.get_surname():
_broken = True
return _broken
def get_message(self):
return _("Husband and wife with the same surname")
class LargeAgeGapFamily(FamilyRule):
ID = 17
SEVERITY = Rule.WARNING
def __init__(self,db, obj, hw_diff,est):
FamilyRule.__init__(self,db, obj)
self.hw_diff = hw_diff
self.est = est
def _get_params(self):
return (self.hw_diff,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
large_diff = \
abs(father_birth_date-mother_birth_date)/365 > self.hw_diff
return (mother_birth_date_ok and father_birth_date_ok and large_diff)
def get_message(self):
return _("Large age difference between spouses")
class MarriageBeforeBirth(FamilyRule):
ID = 18
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok
and (father_birth_date > marr_date))
mother_broken = (mother_birth_date_ok and marr_date_ok
and (mother_birth_date > marr_date))
return (father_broken or mother_broken)
def get_message(self):
return _("Marriage before birth")
class MarriageAfterDeath(FamilyRule):
ID = 19
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_death_date = get_death_date(self.db,mother,self.est)
father_death_date = get_death_date(self.db,father,self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
father_broken = (father_death_date_ok and marr_date_ok
and (father_death_date < marr_date))
mother_broken = (mother_death_date_ok and marr_date_ok
and (mother_death_date < marr_date))
return (father_broken or mother_broken)
def get_message(self):
return _("Marriage after death")
class EarlyMarriage(FamilyRule):
ID = 20
SEVERITY = Rule.WARNING
def __init__(self,db, obj,yng_mar,est):
FamilyRule.__init__(self,db, obj)
self.yng_mar = yng_mar
self.est = est
def _get_params(self):
return (self.yng_mar,self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok and
father_birth_date < marr_date and
((marr_date - father_birth_date)/365 < self.yng_mar))
mother_broken = (mother_birth_date_ok and marr_date_ok and
mother_birth_date < marr_date and
((marr_date - mother_birth_date)/365 < self.yng_mar))
return (father_broken or mother_broken)
def get_message(self):
return _("Early marriage")
class LateMarriage(FamilyRule):
ID = 21
SEVERITY = Rule.WARNING
def __init__(self,db, obj, old_mar,est):
FamilyRule.__init__(self,db, obj)
self.old_mar = old_mar
self.est = est
def _get_params(self):
return (self.old_mar,self.est)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok and
((marr_date - father_birth_date)/365 > self.old_mar))
mother_broken = (mother_birth_date_ok and marr_date_ok and
((marr_date - mother_birth_date)/365 > self.old_mar))
return (father_broken or mother_broken)
def get_message(self):
return _("Late marriage")
## class MarriageBeforePrefiousMarrChild(PersonRule):
## def broken(self):
## marr_date = get_marriage_date(self.obj)
## prev_marr_child_date = get_prev_marr_child_date(self.obj)
## return (prev_marr_child_date>marr_date)
## def get_message(self):
## return _("Marriage before having a child from previous marriage")
## class LongWidowhood(FamilyRule):
## def broken(self):
## marr_date = get_marriage_date(self.obj)
## prev_marr_spouse_death_date = get_prev_marr_spouse_death_date(self.obj)
## birth_date = get_birth_date(self.obj)
## return (marr_date-prev_marr_spouse_death_date>lngwdw)
## def get_message(self):
## return _("Long Windowhood")
class OldParent(FamilyRule):
ID = 22
SEVERITY = Rule.WARNING
def __init__(self,db, obj, old_mom, old_dad,est):
FamilyRule.__init__(self,db, obj)
self.old_mom = old_mom
self.old_dad = old_dad
self.est = est
def _get_params(self):
return (self.old_mom,self.old_dad,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok and
((child_birth_date - father_birth_date)/365 > self.old_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok and
((child_birth_date - mother_birth_date)/365 > self.old_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
return _("Old father")
def mother_message(self):
return _("Old mother")
class YoungParent(FamilyRule):
ID = 23
SEVERITY = Rule.WARNING
def __init__(self,db, obj,yng_mom,yng_dad,est):
FamilyRule.__init__(self,db, obj)
self.yng_dad = yng_dad
self.yng_mom = yng_mom
self.est = est
def _get_params(self):
return (self.yng_mom,self.yng_dad,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok and
((child_birth_date - father_birth_date)/365 < self.yng_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok and
((child_birth_date - mother_birth_date)/365 < self.yng_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
return _("Young father")
def mother_message(self):
return _("Young mother")
class UnbornParent(FamilyRule):
ID = 24
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok
and (father_birth_date > child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok
and (mother_birth_date > child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
return _("Unborn father")
def mother_message(self):
return _("Unborn mother")
class DeadParent(FamilyRule):
ID = 25
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_death_date = get_death_date(self.db,mother,self.est)
father_death_date = get_death_date(self.db,father,self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
hasBirthRelToMother = child_ref.mrel == ChildRefType.BIRTH
hasBirthRelToFather = child_ref.frel == ChildRefType.BIRTH
father_broken = (hasBirthRelToFather
and father_death_date_ok
and ((father_death_date + 294) < child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (hasBirthRelToMother
and mother_death_date_ok
and (mother_death_date < child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
return _("Dead father")
def mother_message(self):
return _("Dead mother")
class LargeChildrenSpan(FamilyRule):
ID = 26
SEVERITY = Rule.WARNING
def __init__(self,db, obj,cb_span,est):
FamilyRule.__init__(self,db, obj)
self.cb_span = cb_span
self.est = est
def _get_params(self):
return (self.cb_span,self.est)
def broken(self):
child_birh_dates = get_child_birth_dates(self.db,self.obj,self.est)
child_birh_dates.sort()
return (child_birh_dates and ((child_birh_dates[-1]
- child_birh_dates[0])/365
> self.cb_span))
def get_message(self):
return _("Large year span for all children")
class LargeChildrenAgeDiff(FamilyRule):
ID = 27
SEVERITY = Rule.WARNING
def __init__(self,db, obj,c_space,est):
FamilyRule.__init__(self,db, obj)
self.c_space = c_space
self.est = est
def _get_params(self):
return (self.c_space,self.est)
def broken(self):
child_birh_dates = get_child_birth_dates(self.db,self.obj,self.est)
child_birh_dates_diff = [child_birh_dates[i+1] - child_birh_dates[i]
for i in range(len(child_birh_dates)-1) ]
return (child_birh_dates_diff and
max(child_birh_dates_diff)/365 > self.c_space)
def get_message(self):
return _("Large age differences between children")
class Disconnected(PersonRule):
ID = 28
SEVERITY = Rule.WARNING
def broken(self):
return (len(self.obj.get_parent_family_handle_list())
+ len(self.obj.get_family_handle_list()) == 0)
def get_message(self):
return _("Disconnected individual")
class InvalidBirthDate(PersonRule):
ID = 29
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
if not self._invdate: return False # should we check?
# if so, let's get the birth date
person = self.obj
birth_ref = person.get_birth_ref()
if birth_ref:
birth_event = self.db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
if birth_date and not birth_date.get_valid():
return True
return False
def get_message(self):
return _("Invalid birth date")
class InvalidDeathDate(PersonRule):
ID = 30
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
if not self._invdate: return False # should we check?
# if so, let's get the death date
person = self.obj
death_ref = person.get_death_ref()
if death_ref:
death_event = self.db.get_event_from_handle(death_ref.ref)
death_date = death_event.get_date_object()
if death_date and not death_date.get_valid():
return True
return False
def get_message(self):
return _("Invalid death date")
class MarriedRelation(FamilyRule):
ID = 31
SEVERITY = Rule.WARNING
def __init__(self,db, obj):
FamilyRule.__init__(self,db, obj)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
married = self.obj.get_relationship() == FamilyRelType.MARRIED
if not married and marr_date_ok:
return self.get_message
def get_message(self):
return _("Marriage date but not married")
class OldAgeButNoDeath(PersonRule):
ID = 32
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_age,est):
PersonRule.__init__(self,db,person)
self.old_age = old_age
self.est = est
def _get_params(self):
return (self.old_age,self.est)
def broken(self):
birth_date = get_birth_date(self.db,self.obj,self.est)
dead = get_death_date(self.db,self.obj,True) # if no death use burial
if dead or not birth_date:
return 0
age = ( _today - birth_date ) / 365
return ( age > self.old_age )
def get_message(self):
return _("Old age but no death")
|
pmghalvorsen/gramps_branch
|
gramps/plugins/tool/verify.py
|
Python
|
gpl-2.0
| 56,750
|
[
"Brian"
] |
298c9a8825171555ed34b6be6657999ac126b6477b5181fa5b3d1e3cc9dd16d7
|
"""
Utility functions.
@author J. Chiang <jchiang@slac.stanford.edu>
"""
#
# $Id: utilities.py,v 1.2 2003/10/05 16:30:40 jchiang Exp $
#
import math, numarray
def locate(x, xx):
"""
Binary search algorithm.
usage: indx = locate(x, xx)
x = input array; it must be in ascending order.
xx = the search value
indx = the index such that x[indx] < xx < x[indx+1];
0 if xx <= x[0]
len(x) - 2 if xx >= x[-1]
"""
imin = 0
imax = len(x) - 1
if (xx <= x[imin]): return imin
if (xx >= x[-1]): return imax-1
while (x[imin] < xx < x[imax]):
inew = (imin + imax)/2
if (xx > x[inew]): imin = inew
if (xx == x[inew] or imax-imin == 1): return inew
if (xx < x[inew]): imax = inew
return inew
def interpolate(x, y, xx):
"""
Linear interpolation.
usage: yy = interpolate(x, y, xx)
x = abscissa array
y = ordinate array
xx = desired abscissa value
yy = desired ordinate value
"""
i = locate(x, xx)
return (xx - x[i])/(x[i+1] - x[i])*(y[i+1] - y[i]) + y[i]
class gaussian:
"""
1D Gaussian function class.
usage: f = gaussian(mean, sigma);
y = f(x), where x is a single value or a list and y returns
as a like object.
yint = f.integral(xmin, xmax)
"""
import numarray
def __init__(self, mean, sigma):
self.mean = mean
self.sigma = sigma
def __call__(self, x):
if (type(x) == type(1) or type(x) == type(1.)):
return self.value(x)
elif (type(x) == type([])):
my_list = []
for xx in x:
my_list.append(self.value(xx))
return my_list
elif (type(x) == numarray.NumArray):
return (numarray.exp(-numarray.power((x-self.mean)/self.sigma, 2)/2.)
/numarray.sqrt(2.*numarray.pi)/self.sigma)
def value(self, x):
return (math.exp( -math.pow((x - self.mean)/self.sigma, 2)/2. )
/math.sqrt(2.*math.pi)/self.sigma)
def integral(self, xmin, xmax):
zmin = (xmin - self.mean)/math.sqrt(2.)/self.sigma
zmax = (xmax - self.mean)/math.sqrt(2.)/self.sigma
return (self.erfcc(zmin) - self.erfcc(zmax))/2.
def erfcc(self, x):
z = abs(x)
t = 1./(1. + z/2.)
ans = ( t*math.exp(-z*z-1.26551223+t*(1.00002368+
t*(0.37409196+t*(0.09678418+t*(-0.18628806+
t*(0.27886807+t*(-1.13520398+t*(1.48851587+
t*(-0.82215223+t*0.17087277))))))))) )
if x >= 0:
return ans
else:
return 2. - ans
class histogram:
"""
A simple histogramming class.
usage: my_hist = histogram(xmin=0, xmax=1, nbins=30)
my_hist.fill(x) # add an entry
x = my_hist.abscissa() # return a numarray of x-axis values
y = my_hist[i] # index operator for accessing bin values
my_hist.min() # minimum and maximum bin values
my_hist.max()
"""
def __init__(self, xmin=0, xmax=1, nbins=30):
self.xmin = xmin
self.xmax = xmax
self.nbins = nbins
self.xstep = float(xmax - xmin)/float(nbins)
self.hist = numarray.zeros(nbins)
def fill(self, x):
if x > self.xmin and x < self.xmax:
indx = int((x - self.xmin)/self.xstep)
if indx < self.nbins: self.hist[indx] = self.hist[indx] + 1
def abscissa(self):
return (numarray.arange(self.nbins)*self.xstep + self.xstep/2.
+ self.xmin)
def __getitem__(self, index):
return self.hist[index]
def min(self):
return min(self.hist)
def max(self):
return max(self.hist)
|
plasmodic/hippodraw
|
python/utilities.py
|
Python
|
gpl-2.0
| 3,773
|
[
"Gaussian"
] |
ae69458df92e4e994d905f934c3a30b4d5de34d70704c5a10203eaf89dc7eba6
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBiovizbase(RPackage):
"""The biovizBase package is designed to provide a set of
utilities, color schemes and conventions for genomic data.
It serves as the base for various high-level packages for
biological data visualization. This saves development effort
and encourages consistency."""
homepage = "http://bioconductor.org/packages/biovizBase/"
git = "https://git.bioconductor.org/packages/biovizBase.git"
version('1.24.0', commit='ae9cd2ff665b74a8f45ed9c1d17fc0a778b4af6c')
depends_on('r@3.4.0:3.4.9', when='@1.24.0')
depends_on('r-scales', type=('build', 'run'))
depends_on('r-hmisc', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-dichromat', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-ensembldb', type=('build', 'run'))
depends_on('r-annotationfilter', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-biovizbase/package.py
|
Python
|
lgpl-2.1
| 2,832
|
[
"Bioconductor"
] |
eb8707e5a037e07ab64d4b3c5fbd9f81c8dd91da9885903235bcc986498da96e
|
"""scandir, a better directory iterator and faster os.walk(), now in the Python 3.5 stdlib
scandir() is a generator version of os.listdir() that returns an
iterator over files in a directory, and also exposes the extra
information most OSes provide while iterating files in a directory
(such as type and stat information).
This module also includes a version of os.walk() that uses scandir()
to speed it up significantly.
See README.md or https://github.com/benhoyt/scandir for rationale and
docs, or read PEP 471 (https://www.python.org/dev/peps/pep-0471/) for
more details on its inclusion into Python 3.5
scandir is released under the new BSD 3-clause license. See
LICENSE.txt for the full license text.
"""
from __future__ import division
from errno import ENOENT
from os import listdir, lstat, stat, strerror
from os.path import join, islink
from stat import S_IFDIR, S_IFLNK, S_IFREG
import collections
import os
import sys
try:
import _scandir
except ImportError:
_scandir = None
try:
import ctypes
except ImportError:
ctypes = None
if _scandir is None and ctypes is None:
import warnings
warnings.warn("scandir can't find the compiled _scandir C module "
"or ctypes, using slow generic fallback")
__version__ = '1.6'
__all__ = ['scandir', 'walk']
# Windows FILE_ATTRIBUTE constants for interpreting the
# FIND_DATA.dwFileAttributes member
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_VIRTUAL = 65536
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
unicode = str # Because Python <= 3.2 doesn't have u'unicode' syntax
class GenericDirEntry(object):
__slots__ = ('name', '_stat', '_lstat', '_scandir_path', '_path')
def __init__(self, scandir_path, name):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
def is_dir(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFDIR
def is_file(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFREG
def is_symlink(self):
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFLNK
def inode(self):
st = self.stat(follow_symlinks=False)
return st.st_ino
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def _scandir_generic(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
for name in listdir(path):
yield GenericDirEntry(path, name)
if IS_PY3 and sys.platform == 'win32':
def scandir_generic(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_generic(path)
scandir_generic.__doc__ = _scandir_generic.__doc__
else:
scandir_generic = _scandir_generic
scandir_c = None
scandir_python = None
if sys.platform == 'win32':
if ctypes is not None:
from ctypes import wintypes
# Various constants from windows.h
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
ERROR_FILE_NOT_FOUND = 2
ERROR_NO_MORE_FILES = 18
IO_REPARSE_TAG_SYMLINK = 0xA000000C
# Numer of seconds between 1601-01-01 and 1970-01-01
SECONDS_BETWEEN_EPOCHS = 11644473600
kernel32 = ctypes.windll.kernel32
# ctypes wrappers for (wide string versions of) FindFirstFile,
# FindNextFile, and FindClose
FindFirstFile = kernel32.FindFirstFileW
FindFirstFile.argtypes = [
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindFirstFile.restype = wintypes.HANDLE
FindNextFile = kernel32.FindNextFileW
FindNextFile.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindNextFile.restype = wintypes.BOOL
FindClose = kernel32.FindClose
FindClose.argtypes = [wintypes.HANDLE]
FindClose.restype = wintypes.BOOL
Win32StatResult = collections.namedtuple('Win32StatResult', [
'st_mode',
'st_ino',
'st_dev',
'st_nlink',
'st_uid',
'st_gid',
'st_size',
'st_atime',
'st_mtime',
'st_ctime',
'st_atime_ns',
'st_mtime_ns',
'st_ctime_ns',
'st_file_attributes',
])
def filetime_to_time(filetime):
"""Convert Win32 FILETIME to time since Unix epoch in seconds."""
total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime
return total / 10000000 - SECONDS_BETWEEN_EPOCHS
def find_data_to_stat(data):
"""Convert Win32 FIND_DATA struct to stat_result."""
# First convert Win32 dwFileAttributes to st_mode
attributes = data.dwFileAttributes
st_mode = 0
if attributes & FILE_ATTRIBUTE_DIRECTORY:
st_mode |= S_IFDIR | 0o111
else:
st_mode |= S_IFREG
if attributes & FILE_ATTRIBUTE_READONLY:
st_mode |= 0o444
else:
st_mode |= 0o666
if (attributes & FILE_ATTRIBUTE_REPARSE_POINT and
data.dwReserved0 == IO_REPARSE_TAG_SYMLINK):
st_mode ^= st_mode & 0o170000
st_mode |= S_IFLNK
st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow
st_atime = filetime_to_time(data.ftLastAccessTime)
st_mtime = filetime_to_time(data.ftLastWriteTime)
st_ctime = filetime_to_time(data.ftCreationTime)
# Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev,
# st_nlink, st_uid, st_gid
return Win32StatResult(st_mode, 0, 0, 0, 0, 0, st_size,
st_atime, st_mtime, st_ctime,
int(st_atime * 1000000000),
int(st_mtime * 1000000000),
int(st_ctime * 1000000000),
attributes)
class Win32DirEntryPython(object):
__slots__ = ('name', '_stat', '_lstat', '_find_data', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, find_data):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._find_data = find_data
self._path = None
self._inode = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
# It's a symlink, call link-following stat()
self._stat = stat(self.path)
else:
# Not a symlink, stat is same as lstat value
if self._lstat is None:
self._lstat = find_data_to_stat(self._find_data)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
# Lazily convert to stat object, because it's slow
# in Python, and often we only need is_dir() etc
self._lstat = find_data_to_stat(self._find_data)
return self._lstat
def is_dir(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFDIR
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY != 0)
def is_file(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFREG
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY == 0)
def is_symlink(self):
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_REPARSE_POINT != 0 and
self._find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
def inode(self):
if self._inode is None:
self._inode = lstat(self.path).st_ino
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def win_error(error, filename):
exc = WindowsError(error, ctypes.FormatError(error))
exc.filename = filename
return exc
def _scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
# Call FindFirstFile and handle errors
if isinstance(path, bytes):
is_bytes = True
filename = join(path.decode('mbcs', 'strict'), '*.*')
else:
is_bytes = False
filename = join(path, '*.*')
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
handle = FindFirstFile(filename, data_p)
if handle == INVALID_HANDLE_VALUE:
error = ctypes.GetLastError()
if error == ERROR_FILE_NOT_FOUND:
# No files, don't yield anything
return
raise win_error(error, path)
# Call FindNextFile in a loop, stopping when no more files
try:
while True:
# Skip '.' and '..' (current and parent directory), but
# otherwise yield (filename, stat_result) tuple
name = data.cFileName
if name not in ('.', '..'):
if is_bytes:
name = name.encode('mbcs', 'replace')
yield Win32DirEntryPython(path, name, data)
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
success = FindNextFile(handle, data_p)
if not success:
error = ctypes.GetLastError()
if error == ERROR_NO_MORE_FILES:
break
raise win_error(error, path)
finally:
if not FindClose(handle):
raise win_error(ctypes.GetLastError(), path)
if IS_PY3:
def scandir_python(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_python(path)
scandir_python.__doc__ = _scandir_python.__doc__
else:
scandir_python = _scandir_python
if _scandir is not None:
scandir_c = _scandir.scandir
if _scandir is not None:
scandir = scandir_c
elif ctypes is not None:
scandir = scandir_python
else:
scandir = scandir_generic
# Linux, OS X, and BSD implementation
elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform:
have_dirent_d_type = (sys.platform != 'sunos5')
if ctypes is not None and have_dirent_d_type:
import ctypes.util
DIR_p = ctypes.c_void_p
# Rather annoying how the dirent struct is slightly different on each
# platform. The only fields we care about are d_name and d_type.
class Dirent(ctypes.Structure):
if sys.platform.startswith('linux'):
_fields_ = (
('d_ino', ctypes.c_ulong),
('d_off', ctypes.c_long),
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
else:
_fields_ = (
('d_ino', ctypes.c_uint32), # must be uint32, not ulong
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_namlen', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
DT_UNKNOWN = 0
DT_DIR = 4
DT_REG = 8
DT_LNK = 10
Dirent_p = ctypes.POINTER(Dirent)
Dirent_pp = ctypes.POINTER(Dirent_p)
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
opendir = libc.opendir
opendir.argtypes = [ctypes.c_char_p]
opendir.restype = DIR_p
readdir_r = libc.readdir_r
readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp]
readdir_r.restype = ctypes.c_int
closedir = libc.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
file_system_encoding = sys.getfilesystemencoding()
class PosixDirEntry(object):
__slots__ = ('name', '_d_type', '_stat', '_lstat', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, d_type, inode):
self._scandir_path = scandir_path
self.name = name
self._d_type = d_type
self._inode = inode
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
self._stat = stat(self.path)
else:
if self._lstat is None:
self._lstat = lstat(self.path)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
def is_dir(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFDIR
else:
return self._d_type == DT_DIR
def is_file(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFREG
else:
return self._d_type == DT_REG
def is_symlink(self):
if self._d_type == DT_UNKNOWN:
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFLNK
else:
return self._d_type == DT_LNK
def inode(self):
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def posix_error(filename):
errno = ctypes.get_errno()
exc = OSError(errno, strerror(errno))
exc.filename = filename
return exc
def scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
if isinstance(path, bytes):
opendir_path = path
is_bytes = True
else:
opendir_path = path.encode(file_system_encoding)
is_bytes = False
dir_p = opendir(opendir_path)
if not dir_p:
raise posix_error(path)
try:
result = Dirent_p()
while True:
entry = Dirent()
if readdir_r(dir_p, entry, result):
raise posix_error(path)
if not result:
break
name = entry.d_name
if name not in (b'.', b'..'):
if not is_bytes:
name = name.decode(file_system_encoding)
yield PosixDirEntry(path, name, entry.d_type, entry.d_ino)
finally:
if closedir(dir_p):
raise posix_error(path)
if _scandir is not None:
scandir_c = _scandir.scandir
if _scandir is not None:
scandir = scandir_c
elif ctypes is not None:
scandir = scandir_python
else:
scandir = scandir_generic
# Some other system -- no d_type or stat information
else:
scandir = scandir_generic
def _walk(top, topdown=True, onerror=None, followlinks=False):
"""Like Python 3.5's implementation of os.walk() -- faster than
the pre-Python 3.5 version as it uses scandir() internally.
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
for entry in walk(entry.path, topdown, onerror, followlinks):
yield entry
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
for entry in walk(new_path, topdown, onerror, followlinks):
yield entry
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
if IS_PY3 or sys.platform != 'win32':
walk = _walk
else:
# Fix for broken unicode handling on Windows on Python 2.x, see:
# https://github.com/benhoyt/scandir/issues/54
file_system_encoding = sys.getfilesystemencoding()
def walk(top, topdown=True, onerror=None, followlinks=False):
if isinstance(top, bytes):
top = top.decode(file_system_encoding)
return _walk(top, topdown, onerror, followlinks)
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/scandir.py
|
Python
|
apache-2.0
| 23,805
|
[
"VisIt"
] |
bbf0b064748b93a19a61cf7c9b17f00b9473d21f30036b19bc9a58e5caa01592
|
from django.db import models
from django.contrib.auth.models import User
class ParamDict(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class KeyVal(models.Model):
container = models.ForeignKey(ParamDict, db_index=True)
key = models.CharField(max_length=240, db_index=True)
value = models.CharField(max_length=240, db_index=True)
# references = models.ManyToManyField(Reference)
def __unicode__(self):
return self.container.name + "[" + self.key + "=" + self.value + "]"
Channel_Type_CHOICES = (
('Calcium Channel', 'Calcium Channel'),
('Potassium Channel', 'Potassium Channel'),
('Transient Receptor Potential Channel', 'Transient Receptor Potential Channel'),
('Cyclic Nucleotide-Gated Channel', 'Cyclic Nucleotide-Gated Channel'),
('Ligand-Gated Ion Channel', 'Ligand-Gated Ion Channel'),
('Ionotropic Glutamate Receptors', 'Ionotropic Glutamate Receptors'),
('DEG/ENaC Channels', 'DEG/ENaC Channels'),
('Chloride Channel', 'Chloride Channel')
)
# Channel_Subype_CHOICES = (
# ('CaV', 'Voltage-gated'),
# ('KV1', 'Voltage-gated, Shaker/Kv1'),
# ('KV2', 'Voltage-gated, Shab/Kv2'),
# ('KV3', 'Voltage-gated, Shaw/Kv3'),
# ('KV4', 'Voltage-gated, Shal/Kv4'),
# ('KQT', 'KQT'),
# ('KV10-12', 'Voltage-gated, Eag-like/Kv10-12'),
# ('KCa-Slo', 'Calcium-activated Slo'),
# ('KCa-SK', 'Calcium-activated SK'),
# ('TWK', 'TWK'),
# ('Kir', 'Inward-Rectifier'),
# ('Cation, TRP', 'Transient Receptor Potential Cation Channel'),
# ('CNG', 'Cyclic Nucleotide-Gated Channel'),
# ('LGIC', 'Ligand-Gated Ion Channel'),
# ('iGluRs', 'Ionotropic Glutamate Receptors'),
# ('DEG/ENaC/ASIC', 'DEGenerin/Epithelial Na+ Channels/Acid Sensing Ion Channels'),
# ('CLC', 'Chloride Channels And Transporters'),
# ('Auxiliary', 'Auxiliary subunit')
# )
Ion_Type_CHOICES = (
('Ca2+', 'Calcium'),
('K+', 'Potassium'),
('Cl-', 'Chloride'),
('Na+', 'Cation'),
('Cation', 'Cation')
)
Ligand_Type_CHOICES = (
('ATP', 'ATP'),
('Glutamate', 'Glutamate'),
('Acetylcholine', 'Acetylcholine'),
('Serotonin', 'Serotonin'),
('Tyramine', 'Tyramine')
)
class IonChannel(models.Model):
channel_name = models.CharField(null=True, max_length=300)
description = models.TextField(blank=True, null=True)
description_evidences = models.TextField(blank=True, null=True,verbose_name='PMID for description evidence')
channel_type = models.CharField(blank=True, null=True, max_length=300,choices=Channel_Type_CHOICES)
channel_subtype = models.CharField(blank=True, null=True, max_length=300)
ion_type = models.CharField(blank=True, null=True, max_length=200,choices=Ion_Type_CHOICES)
ligand_type = models.CharField(blank=True, null=True, max_length=200,choices=Ligand_Type_CHOICES)
gene_name = models.CharField(blank=True, null=True, max_length=300)
gene_WB_ID = models.CharField(blank=True, null=True, max_length=300)
gene_class = models.CharField(blank=True, null=True, max_length=300)
proteins = models.CharField(blank=True, null=True, max_length=300)
expression_pattern = models.TextField(blank=True, null=True)
expression_evidences = models.TextField(blank=True, null=True,verbose_name='PMID for expression evidence')
last_update = models.DateTimeField(auto_now=True, null=True)
def __unicode__(self):
return self.channel_name
# TODO: Get cells from PyOW
Cell_Type_CHOICES = (
('Muscle', 'Muscle'),
('Neuron', 'Neuron'),
('Motor Neuron', 'Motor Neuron'),
('Xenopus Oocyte', 'Xenopus Oocyte'),
('Generic', 'Generic'),
)
class Cell(models.Model):
cell_name = models.CharField(max_length=300,unique=True,default='Generic')
cell_type = models.CharField(max_length=300,choices=Cell_Type_CHOICES,default='Generic')
ion_channels = models.ManyToManyField(IonChannel,blank=True)
membrane_capacitance = models.FloatField(max_length=200,blank=True, null=True,verbose_name='Capacitance of the membrane (F)')
specific_capacitance = models.FloatField(default=0.01,blank=True, null=True,verbose_name='Specific capacitance of the membrane (F/m2)')
area = models.FloatField(default=6e-9, blank=True, null=True,verbose_name='Total area of the cell (m2)')
def __unicode__(self):
return self.cell_type + ", " + self.cell_name
#
# class CellChannel(models.Model):
# cell = models.ForeignKey(Cell)
# ion_channel = models.ForeignKey(IonChannel)
# channel_density = models.FloatField(blank=True, null=True,verbose_name='Density of the channel in cell (1/m2)')
# e_rev = models.FloatField(blank=True, null=True,verbose_name='Reversal potential of the channel in cell (V)')
# reference = models.ManyToManyField(Reference)
#
# def __unicode__(self):
# return `self.cell` + ", " + `self.ion_channel`
Reference_Type_CHOICES = (
('Genomics', 'Genomics'),
('Proteomics', 'Proteomics'),
('Electrophysiology', 'Electrophysiology'),
('Other', 'Other')
)
class Reference(models.Model):
doi = models.CharField(max_length=300,unique=True)
PMID = models.CharField(max_length=300,blank=True, null=True)
title = models.TextField(blank=True, null=True)
citation = models.TextField(blank=True, null=True)
year = models.CharField(max_length=300,blank=True, null=True)
authors = models.CharField(max_length=300,blank=True, null=True)
journal = models.CharField(max_length=300,blank=True, null=True)
volume = models.CharField(max_length=300,blank=True, null=True)
issue = models.CharField(max_length=300,blank=True, null=True)
pages = models.CharField(max_length=300,blank=True, null=True)
url = models.URLField(blank=True, null=True)
create_date = models.DateTimeField(auto_now=True)
username = models.ForeignKey(User,verbose_name='Contributor')
ion_channels = models.ManyToManyField(IonChannel,blank=True)
cells = models.ManyToManyField(Cell,blank=True)
subject = models.CharField(max_length=300,choices=Reference_Type_CHOICES)
file_url = models.URLField(blank=True, null=True)
def __unicode__(self):
return self.doi + ": " + self.citation + ", " + self.year
class CellChannel(models.Model):
cell = models.ForeignKey(Cell)
ion_channel = models.ForeignKey(IonChannel)
channel_density = models.FloatField(blank=True, null=True,verbose_name='Density of the channel in cell (1/m2)')
reference = models.ForeignKey(Reference)
def __unicode__(self):
return "self.cell" + ", " + "self.ion_channel"
# TODO: Separate experiment conditions from patch clamp
class Experiment(models.Model):
reference = models.ForeignKey(Reference)
create_date = models.DateTimeField()
last_update = models.DateTimeField(auto_now=True)
username = models.ForeignKey(User,verbose_name='Contributer')
comments = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.reference.doi + ": " + self.reference.citation + ", " + self.reference.year
PatchClamp_Type_CHOICES = (
('VClamp', 'Voltage-Clamp'),
('IClamp', 'Current-Clamp')
)
Patch_Type_CHOICES = (
('Whole-cell', 'Whole-cell'),
('Single-channel', 'Single-channel')
)
# TODO: Consider measurement fields: https://pypi.python.org/pypi/django-measurement
class PatchClamp(models.Model):
experiment = models.ForeignKey(Experiment)
ion_channel = models.ForeignKey(IonChannel)
type = models.CharField(max_length=200,choices=PatchClamp_Type_CHOICES)
patch_type = models.CharField(max_length=200,choices=Patch_Type_CHOICES)
cell = models.ForeignKey(Cell, blank=True, null=True,verbose_name='Type of the cell (e.g. muscle, ADAL, Xenopus Oocyte)')
duration = models.FloatField(verbose_name='Patch-Clamp Duration (ms)')
deltat = models.FloatField(default=0.01, verbose_name='Time interval-Deltat (ms)')
start_time = models.FloatField(default=0,verbose_name='Start time (ms)')
end_time = models.FloatField(verbose_name='End time (ms) (default=duration)')
protocol_start = models.FloatField(verbose_name='Initial holding potential or stimulated current (mV or pA)')
protocol_end = models.FloatField(verbose_name='End of Holding potential or stimulated current (mV or pA)')
protocol_step = models.FloatField(verbose_name='Steps of Holding potential or stimulated current (mV or pA)')
cell_age = models.FloatField(default=None, blank=True, null=True,verbose_name='Age of the cell (days)')
membrane_capacitance = models.FloatField(max_length=200,blank=True, null=True,verbose_name='Capacitance of the membrane (F)')
temperature = models.FloatField(default=21, blank=True, null=True,verbose_name='Temperature (Celsius)')
initial_voltage = models.FloatField(blank=True, null=True, verbose_name='Initial holding potential (mV)')
Ca_concentration = models.FloatField(default=None, blank=True, null=True,verbose_name='Initial molar concentration of Calcium (uM)')
Cl_concentration = models.FloatField(default=None, blank=True, null=True,verbose_name='Initial molar concentration of Chloride (mM)')
mutants = models.CharField(max_length=300, blank=True, null=True, verbose_name='Additional ion channel mutants (e.g. nf100,n582,...)')
blockers = models.CharField(max_length=300, blank=True, null=True, verbose_name='Ion channel blockers (e.g. 500e-6 Cd2+,...)')
extra_solution = models.TextField(blank=True, null=True, verbose_name='Extracellular Solution (e.g. 140e-3 NaCl, 5e-3 KCl,...)')
pipette_solution = models.TextField(blank=True, null=True, verbose_name='Pipette Solution (e.g. 120e-3 KCl, 20e-3 KOH,...)')
def __unicode__(self):
return "self.ion_channel" + " " + "self.experiment" + " " + self.type
# TODO: consider multiple channels
Axis_Type_CHOICES = (
('I', 'Current'),
('I_ss', 'Steady-state Current'),
('I_peak', 'Peak Current'),
('I_norm', 'Normalized Current'),
('V', 'Voltage'),
('T', 'Time'),
('G', 'Conductance'),
('G/G_max', 'G/G_max'),
('Po', 'Open Probability'),
('Po_Peak', 'Peak Open Probability'),
('Ca_concentration', 'Calcium Concentration'),
('Cl_concentration', 'Chloride Concentration'),
('Bar', 'Bar Chart'),
)
class Graph(models.Model):
experiment = models.ForeignKey(Experiment, null=True, blank=True)
patch_clamp = models.ForeignKey(PatchClamp, null=True, blank=True)
x_axis_type = models.CharField(max_length=50, choices=Axis_Type_CHOICES)
x_axis_unit = models.CharField(max_length=50,verbose_name='Axis unit in the original figure (e.g. ms)')
x_axis_toSI = models.FloatField(default=1,verbose_name='Multiply by this value to convert to SI (e.g. 1e-3)')
y_axis_type = models.CharField(max_length=50, choices=Axis_Type_CHOICES)
y_axis_unit = models.CharField(max_length=50,verbose_name='Axis unit in the original figure (e.g. mV)')
y_axis_toSI = models.FloatField(default=1,verbose_name='Multiply by this value to convert to SI (e.g. 1e-3)')
ion_channel = models.ManyToManyField(IonChannel)
mutants = models.CharField(max_length=300, blank=True, null=True, verbose_name='Additional ion channel mutants (e.g. nf100,n582)')
figure_ref_address = models.CharField(max_length=50,verbose_name='Figure number (e.g. 2A)')
figure_ref_caption = models.TextField(verbose_name='Figure caption')
file = models.ImageField(upload_to='ion_channel/graph/%Y/%m/%d')
def __unicode__(self):
return str(self.y_axis_type) + "/" + str(self.x_axis_type) + " relation, Fig. " + \
str(self.figure_ref_address) + ", From: " + self.experiment.reference.citation + \
", " + self.experiment.reference.year
class GraphData(models.Model):
graph = models.ForeignKey(Graph)
series_name = models.CharField(max_length=200)
series_data = models.TextField()
def __unicode__(self):
return self.series_name
def asarray(self):
xy = self.series_data.splitlines()
data = list()
for row in xy:
data += [map(float, row.split(','))]
return data
Modeling_Method_CHOICES = (
('Experimental', 'Experimental'),
('Estimated', 'Estimated')
)
Model_Type_CHOICES = (
('HH', 'Hodgkin-Huxley'),
('Markov', 'Markov')
)
class IonChannelModel(models.Model):
channel_name = models.ForeignKey(IonChannel)
cell_name = models.ForeignKey(Cell, blank=True, null=True)
model_type = models.CharField(max_length=300,choices=Model_Type_CHOICES, default='HH')
modeling_type = models.CharField(max_length=300,choices=Modeling_Method_CHOICES,default='Experimental')
experiment = models.ForeignKey(Experiment)
graphs = models.ManyToManyField(Graph)
username = models.ManyToManyField(User,verbose_name='Curator(s)')
date = models.DateTimeField(auto_now=True)
parameters = models.ManyToManyField(ParamDict)
score = models.FloatField(default=None, blank=True, null=True,verbose_name='Evaluated Score')
neuroML_file = models.FilePathField(blank=True, null=True)
references = models.ManyToManyField(Reference)
def __unicode__(self):
return "self.channel_name" + " " + "self.experiment"
class Protein(models.Model):
name = models.CharField(max_length=300, unique=True)
ion_channel = models.ForeignKey(IonChannel)
sequence = models.TextField(blank=True, null=True)
fasta = models.TextField(blank=True, null=True)
gi = models.CharField(max_length=300,blank=True, null=True,verbose_name='GI number')
uniprot_ID = models.CharField(blank=True, null=True, max_length=300)
wb_ID = models.CharField(blank=True, null=True, max_length=300)
pdb_ID = models.CharField(blank=True, null=True, max_length=300)
interpro_ID = models.CharField(blank=True, null=True, max_length=300)
structure = models.TextField(blank=True, null=True)
structure_image = models.ImageField(blank=True, null=True, upload_to='ion_channel/structures/')
def __unicode__(self):
return self.name
|
gsarma/ChannelWorm
|
channelworm/ion_channel/models.py
|
Python
|
mit
| 14,099
|
[
"NEURON"
] |
a65f5aa98625d38b60e89306bfbf4ad2fa6ed1080e71a6ebc506a48041ddccce
|
# Copyright (c) 2013-2016, Massachusetts Institute of Technology
# Copyright (c) 2016-2022, Alex Gorodetsky
#
# This file is part of GPEXP:
# Author: Alex Gorodetsky alex@alexgorodetsky
#
# GPEXP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# GPEXP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GPEXP. If not, see <http://www.gnu.org/licenses/>.
# Code
import numpy as np
import math
def sampleLHS(numPts):
#1D
segSize = 1.0/float(numPts)
pointVal = np.zeros(numPts)
for ii in range(numPts):
segMin = float(ii) * segSize
point = segMin + (np.random.random() * segSize)
pointVal[ii] = point #(point #* (1 - -1)) + -1
return pointVal
def genSample2DCircle(size, radius):
#2Dcircle centered at 0
#implemented using rejection sampling
numPoints = size[0]
numDims = size[1]
samples = np.zeros(size)
for ii in range(numPoints):
notValid = 1
while notValid:
s = np.random.uniform(-1, 1, numDims)
if np.sqrt(s[0]**2.0 + s[1]**2.0) < radius:
notValid = 0
samples[ii,:] = s
return samples
def genSampleNDCircle(size, radius):
numPoints = size[0]
numDims = size[1]
samples = np.zeros(size)
for ii in range(numPoints):
notValid = 1
while notValid:
s = np.random.uniform(-1,1,numDims)
if np.sqrt(np.sum(s**2.0)) < radius:
notValid = 0
samples[ii,:] = s
return samples
def distribFunc2DCircle(points, radius):
area = np.pi*2.0*radius**2
prob = 1.0/area
out = np.zeros((len(points)))
out[points[:,0]**2.0 + points[:,1]**2.0 < radius**2.0] = prob
return out
def distribFuncNDCircle(points, radius):
nDim = points.shape[1]
area = np.pi**(nDim/2.0)/math.gamma(nDim/2.0 + 1)*radius**nDim
prob = 1.0/area
out = np.zeros((len(points)))
out[points[:,0]**2.0 + points[:,1]**2.0 < radius**2.0] = prob
return out
def genSampleUniform(size):
# uniform on [-1, 1]
return np.random.rand(size[0],size[1])*2.0 - 1.0
def distribFuncUniform(points):
out = np.array(np.fabs(points)<1) / 2.0
return out
def distribFuncUniform2D(points):
out = np.array(np.fabs(points)<1) / 4.0
return out
def distribFunctionUniformND(points, dim):
out = np.array(np.fabs(points)<1) / (2.0**dim)
return out
def genSampleMickey(size):
centersIn = [ (0.5,0), (-0.5,0), (0.0,-0.5) ]
radiusesIn = [ 0.3, 0.3, 0.5 ]
centersOut = [ (0.0,-0.5) , (0.2, -0.3 ) , (-0.2, -0.3 ) ]
radiusesOut = [ 0.05, 0.1, 0.1]
#circles = { (0.5,0) : 0.3, (-0.5,0) : 0.3, (0.0,-0.5) : 0.5, (0.0, -0.5): 0.05 ,
# (0.2, -0.3 ) : 0.1, (-0.2, -0.3 ): 0.1}
ellipseCenters = [ 0, -0.75]
ellipseRad = [ 0.2, 0.1]
nSamples = size[0]
samplesKeep = np.zeros((0,2))
while len(samplesKeep) < nSamples:
samples = np.random.rand(nSamples,2)*2.0-1.0
samplesOk = np.zeros((0,2))
#now need to do rejection sampling
#algorithm will check if its in any of the circles
for center, rad in zip(centersIn, radiusesIn):
rSquared = (samples[:,0]- center[0])**2.0 + (samples[:,1] - center[1])**2.0
#print "rsquared ", np.nonzero(rSquared< rad**2)
if len(rSquared) > 0:
ptsok = samples[rSquared < rad**2]
samplesOk = np.concatenate((samplesOk, ptsok), axis=0)
samples = samples[rSquared > rad**2]
for center, rad in zip(centersOut, radiusesOut):
rSquared = (samplesOk[:,0]- center[0])**2.0 + (samplesOk[:,1] - center[1])**2.0
if len(rSquared) > 0:
samplesIn = samplesOk[rSquared < rad**2.0]
samples = np.concatenate((samples, samplesIn ), axis=0)
samplesOk = samplesOk[rSquared > rad**2]
#check ellipse
checkIn = (samplesOk[:,0] - ellipseCenters[0])**2.0/ellipseRad[0]**2 + \
(samplesOk[:,1] - ellipseCenters[1])**2.0/ellipseRad[1]**2
if len(checkIn) > 0:
samplesIn = samplesOk[checkIn<1]
samplesOk = samplesOk[checkIn>1]
samples = np.concatenate((samples, samplesIn), axis=0)
samplesKeep = np.concatenate((samplesKeep, samplesOk), axis=0)
samplesKeep = samplesKeep[0:nSamples,:]
return samplesKeep
def distribFuncMickey(samplesInAAA):
#really slow but works
area = 1.172
centersIn = [ (0.5,0), (-0.5,0), (0.0,-0.5) ]
radiusesIn = [ 0.3, 0.3, 0.5 ]
centersOut = [ (0.0,-0.5) , (0.2, -0.3 ) , (-0.2, -0.3 ) ]
radiusesOut = [ 0.05, 0.1, 0.1]
#circles = { (0.5,0) : 0.3, (-0.5,0) : 0.3, (0.0,-0.5) : 0.5, (0.0, -0.5): 0.05 ,
# (0.2, -0.3 ) : 0.1, (-0.2, -0.3 ): 0.1}
ellipseCenters = [ 0, -0.75]
ellipseRad = [ 0.2, 0.1]
samplesOk = np.zeros((0,2))
samples = samplesInAAA.copy()
#now need to do rejection sampling
#algorithm will check if its in any of the circles
for center, rad in zip(centersIn, radiusesIn):
rSquared = (samples[:,0]- center[0])**2.0 + (samples[:,1] - center[1])**2.0
if len(rSquared < rad**2) > 0:
ptsok = samples[rSquared < rad**2]
#ptsok = samples(list(np.nonzero(rSquared < rad**2)))
samplesOk = np.concatenate((samplesOk, ptsok), axis=0)
if len(rSquared > rad**2) > 0:
samples = samples[rSquared > rad**2]
for center, rad in zip(centersOut, radiusesOut):
rSquared = (samplesOk[:,0]- center[0])**2.0 + (samplesOk[:,1] - center[1])**2.0
if len(rSquared) > 0:
samplesIn = samplesOk[rSquared < rad**2.0]
samples = np.concatenate((samples, samplesIn ), axis=0)
samplesOk = samplesOk[rSquared > rad**2]
#check ellipse
checkIn = (samplesOk[:,0] - ellipseCenters[0])**2.0/ellipseRad[0]**2 + \
(samplesOk[:,1] - ellipseCenters[1])**2.0/ellipseRad[1]**2
if len(checkIn) > 0:
samplesIn = samplesOk[checkIn<1]
samplesOk = samplesOk[checkIn>1]
samples = np.concatenate((samples, samplesIn), axis=0)
indGood = []
for ii in range(len(samplesInAAA)):
#bad sample
for jj in range(len(samplesOk)):
if np.linalg.norm(samplesInAAA[ii,:]-samplesOk[jj,:]) < 1e-15:
indGood.append(ii)
break
out = np.zeros((len(samplesInAAA)))
out[indGood] = 1.0/area
return out
def genSampleTriangle(size):
#gen samples 2DTriangle with slope y = -x
#size is number of samples
numPoints = size[0]
numDims = size[1]
samples = np.zeros(size)
for ii in range(numPoints):
notValid = 1
while notValid:
s = np.random.uniform(-1, 1, numDims)
if s[0] > -s[1]:
notValid = 0
samples[ii,:] = s
return samples
def genSampleDonut(size, radius):
# size is size of samples
# radius is inner radius
# donut with outer radius = 1
numPoints = size[0]
numDims = size[1]
samples = np.zeros(size)
radius = 0.7;
for ii in range(numPoints):
notValid = 1
while notValid:
s = np.random.uniform(-1, 1, numDims)
if np.sqrt(s[0]**2.0 + s[1]**2.0) > radius:
notValid = 0
samples[ii,:] = s
return samples
def onedGaussDistrib(x):
out = 1.0/(2.0*np.pi)**0.5 * np.exp(-0.5*x**2.0)
return out
def twodGaussDistrib(x):
#isotropic 2d gaussian
out = 1.0/(2.0*np.pi) * np.exp(-0.5*x[:,0]**2.0 -0.5*x[:,1]**2.0)
return out
|
goroda/GPEXP
|
gpExp/utilities.py
|
Python
|
gpl-2.0
| 8,224
|
[
"Gaussian"
] |
9eb88a3f679b321bb038f444cef1669c2c3e21367ee0d83ce02a0f414b852362
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2012 Prof. Richard H. West (r.west@neu.edu),
# Prof. William H. Green (whgreen@mit.edu)
# and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os
import logging
import rmgpy.qm.mopac
import rmgpy.qm.gaussian
from rmgpy.data.thermo import ThermoLibrary
class QMSettings():
"""
A minimal class to store settings related to quantum mechanics calculations.
=================== ======================= ====================================
Attribute Type Description
=================== ======================= ====================================
`software` ``str`` Quantum chemical package name in common letters
`method` ``str`` Semi-empirical method
`fileStore` ``str`` The path to the QMfiles directory
`scratchDirectory` ``str`` The path to the scratch directory
`onlyCyclics` ``bool`` ``True`` if to run QM only on ringed species
`maxRadicalNumber` ``int`` Radicals larger than this are saturated before applying HBI
=================== ======================= ====================================
"""
def __init__(self,
software = None,
method = 'pm3',
fileStore = None,
scratchDirectory = None,
onlyCyclics = True,
maxRadicalNumber = 0,
):
self.software = software
self.method = method
if fileStore:
self.fileStore = os.path.join(fileStore, method)
else:
self.fileStore = None
if scratchDirectory:
self.scratchDirectory = os.path.join(scratchDirectory, method)
else:
self.scratchDirectory = None
self.onlyCyclics = onlyCyclics
self.maxRadicalNumber = maxRadicalNumber
if os.sys.platform == 'win32':
symmetryPath = os.path.join(rmgpy.getPath(),'..', 'bin', 'symmetry.exe')
# If symmetry is not installed in the bin folder, assume it is available on the path somewhere
if not os.path.exists(symmetryPath):
symmetryPath = 'symmetry.exe'
else:
symmetryPath = os.path.join(rmgpy.getPath(),'..', 'bin', 'symmetry')
if not os.path.exists(symmetryPath):
symmetryPath = 'symmetry'
self.symmetryPath = symmetryPath
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (QMSettings, (
self.software,
self.method,
self.fileStore,
self.scratchDirectory,
self.onlyCyclics,
self.maxRadicalNumber,
self.symmetryPath
)
)
def checkAllSet(self):
"""
Check that all the required settings are set.
"""
from types import BooleanType, IntType
assert self.fileStore
#assert self.scratchDirectory
assert self.software
assert self.method
assert self.onlyCyclics is not None # but it can be False
assert type(self.onlyCyclics) is BooleanType
assert self.maxRadicalNumber is not None # but it can be 0
assert type(self.maxRadicalNumber) is IntType
class QMCalculator():
"""
A Quantum Mechanics calculator object, to store settings.
The attributes are:
=================== ======================= ====================================
Attribute Type Description
=================== ======================= ====================================
`settings` :class:`QMSettings` Settings for QM calculations
`database` :class:`ThermoLibrary` Database containing QM calculations
=================== ======================= ====================================
"""
def __init__(self,
software = None,
method = 'pm3',
fileStore = None,
scratchDirectory = None,
onlyCyclics = True,
maxRadicalNumber = 0,
):
self.settings = QMSettings(software = software,
method = method,
fileStore = fileStore,
scratchDirectory = scratchDirectory,
onlyCyclics = onlyCyclics,
maxRadicalNumber = maxRadicalNumber,
)
self.database = ThermoLibrary(name='QM Thermo Library')
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (QMCalculator, (self.settings, self.database))
def setDefaultOutputDirectory(self, outputDirectory):
"""
IF the fileStore or scratchDirectory are not already set, put them in here.
"""
if not self.settings.fileStore:
self.settings.fileStore = os.path.abspath(os.path.join(outputDirectory, 'QMfiles', self.settings.method))
logging.info("Setting the quantum mechanics fileStore to {0}".format(self.settings.fileStore))
if not self.settings.scratchDirectory:
self.settings.scratchDirectory = os.path.abspath(os.path.join(outputDirectory, 'QMscratch', self.settings.method))
logging.info("Setting the quantum mechanics scratchDirectory to {0}".format(self.settings.scratchDirectory))
def initialize(self):
"""
Do any startup tasks.
"""
self.checkReady()
def checkReady(self):
"""
Check that it's ready to run calculations.
"""
self.settings.checkAllSet()
self.checkPaths()
def checkPaths(self):
"""
Check the paths in the settings are OK. Make folders as necessary.
"""
self.settings.fileStore = os.path.expandvars(self.settings.fileStore) # to allow things like $HOME or $RMGpy
self.settings.scratchDirectory = os.path.expandvars(self.settings.scratchDirectory)
for path in [self.settings.fileStore, self.settings.scratchDirectory]:
if not os.path.exists(path):
logging.info("Creating directory %s for QM files."%os.path.abspath(path))
# This try/except should be redundant, but some networked file systems
# seem to be slow or buggy or respond strangely causing problems
# between checking the path exists and trying to create it.
try:
os.makedirs(path)
except OSError as e:
logging.warning("Error creating directory {0}: {1!r}".format(path, e))
logging.warning("Checking it already exists...")
assert os.path.exists(path), "Path {0} still doesn't exist?".format(path)
def getThermoData(self, molecule):
"""
Generate thermo data for the given :class:`Molecule` via a quantum mechanics calculation.
Ignores the settings onlyCyclics and maxRadicalNumber and does the calculation anyway if asked.
(I.e. the code that chooses whether to call this method should consider those settings).
"""
self.initialize()
if self.settings.software == 'mopac':
if self.settings.method == 'pm3':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM3(molecule, self.settings)
elif self.settings.method == 'pm6':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM6(molecule, self.settings)
elif self.settings.method == 'pm7':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM7(molecule, self.settings)
else:
raise Exception("Unknown QM method '{0}' for mopac".format(self.settings.method))
thermo0 = qm_molecule_calculator.generateThermoData()
elif self.settings.software == 'gaussian':
if self.settings.method == 'pm3':
qm_molecule_calculator = rmgpy.qm.gaussian.GaussianMolPM3(molecule, self.settings)
elif self.settings.method == 'pm6':
qm_molecule_calculator = rmgpy.qm.gaussian.GaussianMolPM6(molecule, self.settings)
else:
raise Exception("Unknown QM method '{0}' for gaussian".format(self.settings.method))
thermo0 = qm_molecule_calculator.generateThermoData()
else:
raise Exception("Unknown QM software '{0}'".format(self.settings.software))
return thermo0
def save(rmg):
# Save the QM thermo to a library if QM was turned on
if rmg.quantumMechanics:
logging.info('Saving the QM generated thermo to qmThermoLibrary.py ...')
rmg.quantumMechanics.database.save(os.path.join(rmg.outputDirectory,'qmThermoLibrary.py'))
class QMDatabaseWriter(object):
"""
This class listens to a RMG subject
and saves the thermochemistry of species computed via the
QMTPmethods.
A new instance of the class can be appended to a subject as follows:
rmg = ...
listener = QMDatabaseWriter()
rmg.attach(listener)
Whenever the subject calls the .notify() method, the
.update() method of the listener will be called.
To stop listening to the subject, the class can be detached
from its subject:
rmg.detach(listener)
"""
def __init__(self):
super(QMDatabaseWriter, self).__init__()
def update(self, rmg):
save(rmg)
|
pierrelb/RMG-Py
|
rmgpy/qm/main.py
|
Python
|
mit
| 11,154
|
[
"Gaussian",
"MOPAC"
] |
cad6e1c7ea391babd8dcbf68002f8ca6cca07f1fdeea71a963ac3f5699065f77
|
import sys
import re
import subprocess as sp
# This script complements autoDefine.py.
#
# Its member functions determine what input to print and
# then print that input to the define.input file
#
# FOR THOSE WHO MODIFY/ADD FUNCTIONALITY:
#
# All functions in commandWriters.py must have at least the following arguments:
# botSpecs, entries, defInp
#
# Please only include extra variables if absolutely necessary, we want to avoid
# variable creep in the main function.
#
# When adding a new writing function, define both the defaults and key formats
# the same way previous functions defined them. If this is done a template
# input file may be generated by running the commands:
#
# sed -n -e '/^#KeyFormat:/,/^[^#]/ { /^[^#]/b; p }' commandWriters.py > templateOptions
# sed -i -e 's|^#KeyFormat: ||' -e 's|^#||' templateOptions
# echo '$end' >> templateOptions
#
# And similarly a list of the Default values may be generated by:
#
# sed -n -e '/^#Default/,/^#KeyFormat:/ { /^#KeyFormat:/b; p }' commandWriters.py > defaultList
#getLine() finds the line with key on it and returns everything
# until the next '$', replacing newlines with spaces.
#
# Input:
# opts - The list of specified options
# key - The marker for the section we want,
# should have format '$sectionName'
#
# Output:
# target - The section specified by key with newlines
# replaced by spaces
def getLine(opts, key):
thisIter = iter(opts)
target=None
if opts:
line=thisIter.next()
noEnd=True
else:
noEnd=False
while noEnd:
#Find the section specified by key
if line.split()[0] == key:
target=line.split(key)[1]
line=thisIter.next()
#Get continued lines
while '$' not in line:
target=target.strip('\n')
target=target+' '+line
line=thisIter.next()
#Push through until file end or key reached.
if line.split()[0] == '$end':
noEnd = False
elif line.split()[0] != key :
line=thisIter.next()
return target
#getEscapeChars() replaces the phrases used to represent escaped characters
# with those characters. Currently only needed/used for
# basis set definitions.
#
# Input:
# string - The string to be modified
#
# Output:
# string - The string with new substitutions
def getEscapeChars( string ):
#For now replace SAVEUNDERSCORE and SAVEPERCENT with what they
#would be in the options file. Need some way to specify
#_/% from directory though
escapeDict={ 'SAVESPACE' : ' ', 'SAVEEQUALS' : '=', \
'SAVEUNDERSCORE' : ' ', 'SAVEPERCENT' : '=' }
for phrase in escapeDict:
string = string.replace( phrase, escapeDict[phrase] )
return string
#title() writes title
#
#Default: ''
#
#KeyFormat: $title [title]
def title(botSpecs, entries, defInp):
key='$title'
# The three sources for options are
# in order:
# - The user specified/default options file
# - The options file in directory we are currently
# iterating over.
# - The options specified by the names of the supplied
# directories.
title=getLine(entries, key)
titleBot=getLine(botSpecs, key)
# precedence: title < titleBot < titleDir
if titleBot:
title = titleBot
if not title:
defInp.write('\n')
else:
title=title.strip()
defInp.write(title+'\n')
#readCoord() tells the name of the coordinate file
#
#Default: only $coord present/$coord omitted - file=coord
#
#KeyFormat: $coord file=[fileName]
def readCoord(botSpecs, entries, defInp):
key='$coord'
coord=getLine(entries, key)
coordBot=getLine(botSpecs, key)
if coordBot:
coord = coordBot
if coord:
if 'file=' in coord:
coord=coord.split('file=')[1]
coord = getEscapeChars( coord )
defInp.write('a '+coord)
else:
defInp.write('a coord\n')
else:
defInp.write('a coord\n')
################################################################################
# Begin Coordinate Menu Writers #
################################################################################
#assignSym() tells symmetry to use, groups must have d6h
#symmetry or a group deriving from some subset of D6h
#operations. (Is this correct?)
#'sym=auto' will attempt to automatically determine symmetry.
#
#Default: c1
#
#KeyFormat: $sym sym=[auto/character] eps=[float]
def assignSym(botSpecs, entries, defInp):
key='$sym'
sym=getLine(entries, key)
symBot=getLine(botSpecs, key)
if symBot:
sym = symBot
if sym:
if 'sym=' in sym:
group=sym.split('sym=')[1]
group=group.split()[0]
if 'eps=' in sym:
eps=sym.split('eps=')[1]
eps=eps.split()[0]
if group=='auto':
defInp.write('desy '+eps+'\n')
else:
defInp.write('sy '+group+' '+eps+'\n')
else:
if group=='auto':
defInp.write('desy\n')
else:
defInp.write('sy '+group+'\n')
#fix() handles specification of frozen internal coordinates.
#type takes values: stre,invr,bend,outp,tors,linc,linp
#See section 4.1.2 of the Turbomole manual to learn how atomic
#indices should be specified for each type.
#
#comp and ring type specifications not currently supported
#
#Default: $fix used/omitted - restraint not used
#
#KeyFormat: $fix [type]=[1-4,6]
def fix(botSpecs, entries, defInp):
typeDict = {'stre' : 2,
'invr' : 2,
'bend' : 3,
'outp' : 4,
'tors' : 4,
'linc' : 4,
'linp' : 4 }
key='$fix'
fix=getLine(entries, key)
fixBot=getLine(botSpecs, key)
if fixBot:
if fix:
print 'Warning: frozen coordinates specified in top' \
+ ' level options file will be overridden'
fix = fixBot
if fix:
defInp.write('idef\n')
for f in fix.split():
ftype, atoms = f.split('=')
defInp.write('f '+ftype)
atoms=atoms.split(',')
count = 0
for a in atoms:
if '-' in a:
rnge = range(int(a.split('-')[0]),int(a.split('-')[1]+1))
a = ''
for at in rnge:
a += str(at) + ' '
count += 1
count -= 1
count += 1
defInp.write(' ' + a)
defInp.write('\n')
try:
if typeDict[ftype] != count:
print 'Error: wrong number of indices specified in - '+f
return
except KeyError:
print 'Error: '+ftype+' is not an allowed constraint type'
return
defInp.write('\n\n\n\n')
#detInternals() tells whether internal redundant coordinates
#should be used. It returns True if they are.
#
#Default: 'on'
#
#KeyFormat: $internal [on/off]
def detInternals(botSpecs, entries, defInp):
key='$internal'
internal=getLine(entries, key)
internalBot=getLine(botSpecs, key)
if internalBot:
internal = internalBot
if internal:
internal=internal.strip()
if internal == 'on':
defInp.write('ired\n')
return True
else:
defInp.write('ired\n')
return True
return False
#assignFrags() used to define fragments. It looks like the define section
#for this has some issues so if $frag is used, you must define the
#the fragment number for all atoms. Also, the symmetry assignments for
#individual fragments don't seem to work in define, so auto will always
#be used.
#
#Defaults: '$frag' omitted - No fragments
# '$frag' used - Must define atoms in fragments, no default
# - charge=0 for all fragments
# - auto symmetry determination for each fragment
#
#KeyFormat: $frag frag1=[1-4,6] [ frag2=[1-4,6] frag3=[1-4,6] ]
# chrg1=[int] chrg2=[int] chrg3=[int]
# sym=[auto/none/character] *currently disabled*
def assignFrags(botSpecs, entries, defInp):
key='$frag'
frag=getLine(entries, key)
fragBot=getLine(botSpecs, key)
if fragBot:
frag = fragBot
if frag:
defInp.write('frag\non\nq\nq\n')
if 'frag1=' in frag:
frag1=frag.split('frag1=')[1]
frag1=frag1.split()[0]
ats=frag1.split(',')
for at in ats:
defInp.write('x\n')
at.strip()
rnge=at.split('-')
if len(rnge) == 2:
defInp.write(rnge[0]+'\n'+rnge[1]+'\n1\n')
elif len(rnge) == 1:
defInp.write(rnge[0]+'\n'+rnge[0]+'\n1\n')
else:
print 'Error: invalid entry in $frag'
sys.exit()
if 'frag2=' in frag:
frag2=frag.split('frag2=')[1]
frag2=frag2.split()[0]
ats=frag2.split(',')
for at in ats:
defInp.write('x\n')
at.strip()
rnge=at.split('-')
if len(rnge) == 2:
defInp.write(rnge[0]+'\n'+rnge[1]+'\n2\n')
elif len(rnge) == 1:
defInp.write(rnge[0]+'\n'+rnge[0]+'\n2\n')
else:
print 'Error: invalid entry in $frag'
sys.exit()
if 'frag3=' in frag:
frag3=frag.split('frag3=')[1]
frag3=frag3.split()[0]
ats=frag3.split(',')
for at in ats:
defInp.write('x\n')
at.strip()
rnge=at.split('-')
if len(rnge) == 2:
defInp.write(rnge[0]+'\n'+rnge[1]+'\n3\n')
elif len(rnge) == 1:
defInp.write(rnge[0]+'\n'+rnge[0]+'\n3\n')
else:
print 'Error: invalid entry in $frag'
sys.exit()
# if 'sym=' in frag:
# sym=frag.split('sym=')[1]
# sym=sym.split()[0]
# if sym == '':
if 'chrg' in frag:
defInp.write('cha\n')
if 'chrg1=' in frag:
chrg1=frag.split('chrg1=')[1]
chrg1=chrg1.split()[0]
defInp.write(chrg1+'\n')
if 'chrg2=' in frag:
chrg2=frag.split('chrg2=')[1]
chrg2=chrg2.split()[0]
defInp.write(chrg2+'\n')
if 'chrg3=' in frag:
chrg3=frag.split('chrg3=')[1]
chrg3=chrg2.split()[0]
defInp.write(chrg3+'\n')
defInp.write('\n\n\n')
################################################################################
# Begin Basis Set Menu Writers #
################################################################################
#defBasis() tells what basis set to use for each atom type. Note that
#the basis set names are case-sensitive. More options may be implemented
#in the future.
#
#Default: '$basis' omitted - all def2-SV(P)
# No basis specified for an atom - def2-SV(P)
# No atoms specified for a basis - all atoms
# atoms specified with no basis - don't do this
#
#KeyFormat: $basis [def2-QZVP]=[all/1-4,6/"c"] ...
def defBasis(botSpecs, entries, defInp):
key='$basis'
basis=getLine(entries, key)
basisBot=getLine(botSpecs, key)
if basisBot:
basis = basisBot
#Assigning the basis set information
if basis:
basis = basis.split()
for entry in basis:
defInp.write('b\n')
if '=' in entry:
entry = entry.split('=')
if entry[1] != '':
entry[0] = getEscapeChars(entry[0])
entry[1] = getEscapeChars(entry[1])
defInp.write(entry[1]+' '+entry[0]+'\n')
else:
print 'Error: empty basis set assignment for '+entry[0]
sys.exit()
else:
entry = getEscapeChars(entry)
defInp.write('all '+entry+'\n')
################################################################################
# Begin Molecular Orbital Calculation Writers #
################################################################################
#useHcore() turns hcore guess on or off.
#
#Default: off
#
#KeyFormat: $hcore [on/off]
def useHcore(botSpecs, entries, defInp):
key='$hcore'
hcore=getLine(entries, key)
hcoreBot=getLine(botSpecs, key)
if hcoreBot:
hcore = hcoreBot
# future input depends on hcore
if hcore:
hcore = hcore.strip()
if hcore == 'on':
defInp.write('hcore\n')
return True
return False
#eht() manage Hueckel guess options. EHT is always used
#unless hcore is used instead, it can't be turned off.
#
#Default: global_constant=1.70 mod_Wolfsberg-Helmholz=off
#
#KeyFormat: $eht global=[float] modWH=[on/off]
# (atom index pair format -->)[int,int]=[float]
def eht(botSpecs, entries, defInp):
key='$eht'
eht=getLine(entries, key)
ehtBot=getLine(botSpecs, key)
if ehtBot:
eht = ehtBot
defInp.write('eht\n')
modAtomEht=False
#If key present
if eht:
eht = eht.split()
#Should be 2-D
eht = [ entry.split('=') for entry in eht ]
#Just in case only $eht specified
if len(eht) == 0:
defInp.write('y\n')
return
else:
defInp.write('n\n')
#Set global Hueckel constant first
if 'global' in [item for sub in eht for item in sub]:
for entry in eht:
if len(entry) != 2:
print 'Error: improper eht option supplied'
sys.exit()
if entry[0] == 'global':
defInp.write('y\n'+entry[1]+'\n')
else:
defInp.write('n\n')
#followed by deciding whether to use the
#modified Helmholz-Wolfsberg formula.
if 'modWH' in [item for item in sub for sub in eht]:
for entry in eht:
if len(entry) != 2:
print 'Error: improper eht option supplied'
sys.exit()
if entry[0] == 'modWH' and entry[1] == 'on':
defInp.write('y\n')
else:
defInp.write('n\n')
else:
defInp.write('n\n')
#Then check for any entry with a comma in the first
#range. It's assumed these are atomic indices.
for entry in eht:
if len(entry) != 2:
print 'Error: improper eht option supplied'
sys.exit()
if ',' in entry[0]:
modAtomEht=True
defInp.write('y\n')
indices=entry[0].split(',')
defInp.write(indices[0]+' '+indices[1]+'\n'+entry[1]+'\n')
#Needed for when it asks about printing out
#Hueckel coeffs. Default is no.
if modAtomEht:
defInp.write('\n')
#Will either skip the Hueckel modifying section or exit
#the atom pair section depending on choices.
defInp.write('\n')
#molCharge() define the molecular charge
#
#Default: 0
#
#KeyFormat: $charge [int]
def molCharge(botSpecs, entries, defInp):
key='$charge'
charge=getLine(entries, key)
chargeBot=getLine(botSpecs, key)
if chargeBot:
charge = chargeBot
if charge:
charge = charge.strip()
defInp.write(charge)
defInp.write('\n')
#setOcc() handles options involving electron occupation.
#
#Default: lowest energy occupation
#
#KeyFormat: $occ [int]
def setOcc(botSpecs, entries, defInp):
key='$occ'
occ=getLine(entries, key)
occBot=getLine(botSpecs, key)
if occBot:
occ = occBot
defInp.write('\n')
################################################################################
# Begin Method Definition Writers #
################################################################################
#dft() handles specifications within the density
#functional theory menu.
#
#Default: $dft omitted - DFT not used
# $dft used - functional b-p
# gridsize m3
#
#KeyFormat: $dft func=[label] grid=[label]
def dft(botSpecs, entries, defInp):
key='$dft'
dft=getLine(entries, key)
dftBot=getLine(botSpecs, key)
if dftBot:
dft = dftBot
if dft:
defInp.write('dft\non\n')
if 'func=' in dft:
func=dft.split('func=')[1]
func=func.split()[0]
defInp.write('func '+func+'\n')
if 'grid=' in dft:
grid=dft.split('grid=')[1]
grid=grid.split()[0]
defInp.write('grid '+grid+'\n')
defInp.write('\n')
#ri() handles specifications within the resolution
#of identity menu.
#
#Default: $ri omitted - ri not used
# $ri used - ricore 500 Mb
# - jbas matches basis type
# - file named auxbasis holds jbas
# - when specifying jbas, same defaults
# as specifying basis used
#
#KeyFormat: $ri mem=[int in Mb] file=[name]
# jbas=[def2-QZVP]=[all/1-4,6/"c"] ...
def ri(botSpecs, entries, defInp):
key='$ri'
ri=getLine(entries, key)
riBot=getLine(botSpecs, key)
if riBot:
ri = riBot
if ri:
defInp.write('ri\non\n')
#specify $ricore
if 'mem=' in ri:
mem=ri.split('mem=')[1]
mem=mem.split()[0]
defInp.write('m '+mem+'\n')
# modify file name for $jbas
if 'file=' in ri:
fil=ri.split('file=')[1]
fil=fil.split()[0]
defInp.write('f '+fil+'\n')
# Set the $jbas basis type
if 'jbas=' in ri:
defInp.write('jbas\n')
jbas = ri.split('jbas=')[1:]
for entry in jbas:
entry = entry.split()[0]
if '=' in entry:
ats = entry.split('=')[1]
entry = entry.split('=')[0]
else:
ats = 'all'
entry = getEscapeChars( entry )
defInp.write('b\n'+ats+' '+entry+'\n')
defInp.write('*\n')
defInp.write('\n')
#cc() handles specifications within the cc menu.
#There are quite a few options, may want to break
#it up more. For now only those options relevant to
#rirpa will be implemented. I kind of think cc should
#be more than one menu in define itself.
#
#A note on freeze=: pick one from num/energy/list exactly
#as written. Options can't be combined.
#input for num is number of frozen core orbitals
# energy is the orbital energy cutoff value in Hartree
# list is orbital specification via [1-4,6] style format
#
#Default: $cc used/omitted - maxcor=500mb
# denconv=1d-7
# freeze with no options - energy=-3H
# cbas with no options - cbas matches basis type
#
#KeyFormat: $cc freeze=[num/energy/list]=[input] mem=[int in Mb]
# cbas=[def2-QZVP]=[all/1-4,6/"c"] ...
# denconv=[float]
def cc(botSpecs, entries, defInp):
key='$cc'
cc=getLine(entries, key)
ccBot=getLine(botSpecs, key)
if ccBot:
cc = ccBot
if cc:
defInp.write('cc\n')
# Handles the freeze sub-menu
if 'freeze' in cc:
defInp.write('freeze\n')
if 'freeze=' in cc:
freeze = cc.split('freeze=')[1]
freeze = freeze.split()[0]
if 'num=' in freeze:
num = freeze.split('num=')[1]
defInp.write('core '+num+'\n')
elif 'energy=' in freeze:
energy = freeze.split('energy=')[1]
defInp.write('fp '+energy+'\n')
elif 'list=' in freeze:
lis = freeze.split('list=')[1]
defInp.write('core 0\nf '+lis+'\n')
defInp.write('*\n')
# Handles the cbas sub-menu
if 'cbas' in cc:
defInp.write('cbas\n')
if 'cfail' in cc:
defInp.write('\n')
if 'cbas=' in cc:
cbas = cc.split('cbas=')[1:]
if not cbas[0].split()[0] == 'default':
for entry in cbas:
entry = entry.split()[0]
if '=' in entry:
ats = entry.split('=')[1]
bas = entry.split('=')[0]
else:
ats = 'all'
bas = entry
bas = getEscapeChars( bas )
defInp.write('b\n'+ats+' '+bas+'\n')
defInp.write('*\n')
# Sets $maxcor
if 'mem=' in cc:
mem = cc.split('mem=')[1]
mem = mem.split()[0]
defInp.write('memory '+mem+'\n')
# Sets $denconv
if 'denconv=' in cc:
dens = cc.split('denconv=')[1]
dens = dens.split()[0]
defInp.write('denconv '+dens+'\n')
defInp.write('*\n')
#rirpa() handles rirpa related specifications.
#
#Default: $rirpa omitted - rirpa not used
# $rirpa used - npoints 60, all other settings off
#
#KeyFormat: $rirpa npoints=[int] rpagrad nohxx rpaprof
def rirpa(botSpecs, entries, defInp):
key='$rirpa'
rirpa=getLine(entries, key)
rirpaBot=getLine(botSpecs, key)
if rirpaBot:
rirpa = rirpaBot
if rirpa:
defInp.write('rirpa\n')
if 'npoints=' in rirpa:
npoint = rirpa.split('npoints=')[1]
npoint = npoint.split()[0]
defInp.write('npoints '+npoint+'\n')
if 'rpagrad' in rirpa:
defInp.write('rpagrad\n')
if 'nohxx' in rirpa:
defInp.write('nohxx\n')
if 'rpaprof' in rirpa:
defInp.write('rpaprof\n')
defInp.write('\n')
#scf() handles scf related specifications.
#
#Default: convergence criteria : conv=6 --> 1d-6
# integral threshold values: thize=1d-5, thime=5
# 2e- integral storage : ints=None
# scf iteration limit : iter=60
#
#
#KeyFormat: $scf conv=[int] iter=[int]
def scf(botSpecs, entries, defInp):
key='$scf'
scf=getLine(entries, key)
scfBot=getLine(botSpecs, key)
if scfBot:
scf = scfBot
if scf:
defInp.write('scf\n')
if 'conv=' in scf:
conv=scf.split('conv=')[1]
conv=conv.split()[0]
defInp.write('conv\n'+conv+'\n')
if 'iter=' in scf:
iters=scf.split('iter=')[1]
iters=iters.split()[0]
defInp.write('iter\n'+iters+'\n')
defInp.write('\n')
################################################################################
# Begin Cosmoprep Writer #
################################################################################
#cosmo() writes cosmoprep input, currently only
#epsilon and refractive index may be specified.
#
#Default: '$cosmo' omitted - don't use cosmo
# $cosmo used - epsilon=infinity
# - nppa= 1082
# - nspa= 92
# - disex= 10.0000
# - rsolv= 1.30
# - routf= 0.85
# - cavity closed
# - ampran= 0.1D-04
# - phsran= 0.0
# - refind= 1.3
#
#KeyFormat: $cosmo epsilon=[epsilon] refind=[refind]
def cosmo(botSpecs, entries):
key='$cosmo'
cosmo=getLine(entries, key)
cosmoBot=getLine(botSpecs, key)
if cosmoBot:
cosmo = cosmoBot
if cosmo:
cosInp = open('cosmoprep.input','w')
if 'epsilon=' in cosmo:
eps=cosmo.split('epsilon=')[1]
eps=eps.split()[0]
cosInp.write(eps+'\n')
if 'refind=' in cosmo:
rind=cosmo.split('refind=')[1]
rind=rind.split[0]
cosInp.write(rind+'\n')
cosInp.write('\n'*11)
cosInp.write('r all o\n*\n\n\n')
################################################################################
# Begin Post Processing #
################################################################################
#dsp() handles dispersion related specifications. Define
# could not print these options until V7-2 so we add
# them directly to the control file.
#
#Default: Not used
#
#KeyFormat: $dsp [d3,d3bj,d2]
def dsp(botSpecs, entries, keep_going):
key='$dsp'
dsp=getLine(entries, key)
dspBot=getLine(botSpecs, key)
if dspBot:
dsp = dspBot
if dsp:
#New disp keys go here
disp_dict = {'d3' : '$disp3',
'd3bj' : '$disp3 bj',
'd2' : '$disp'}
dsp = dsp.strip()
try:
#Consider looking for a solution not using Popen
p=sp.Popen("sed -i 's/$end/" + disp_dict[dsp]
+ "\\n$end/' control",shell=True)
p.wait()
except KeyError:
print 'Error: ' + dsp + ' is not an allowed entry for $dsp.'
print 'Allowed entries and the keyword they add to control are:'
for k in disp_dict.keys():
print k + ' adds ' + disp_dict[k]
if not keep_going:
sys.exit()
|
magee256/TurbomoleScripts
|
SetUp/autoDefine/commandWriters.py
|
Python
|
mit
| 25,318
|
[
"TURBOMOLE"
] |
fd6a237503e8c3607bd55313a92fdbf47bef4356d07570364195cb235c3ad257
|
#!/usr/bin/env python
import rospkg
import cv2
import datetime
import numpy as np
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
# class ,descr00,descr01,descr02,descr03,descr04,descr05,descr06,descr07,
# descr08,descr09,descr10,descr11,descr12,descr13,descr14,descr15,descr16,
# descr17,descr18,descr19,descr20,descr21,descr22,descr23,descr24,descr25,
# descr26,descr27,descr28,descr29,descr30,descr31,angle ,classid,octave ,
# x ,y ,respons,size ,imageid
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('stopsign')
IMAGE_RATE = 11 # hz
BULK_DATA_FILE = '%s/data/003_manual_labels/all.csv' % (pkg_path,)
start_image_id = 0
end_image_id = 2189
IMAGE_BASE_STRING = '%s/data/002_original_images/%s' % (pkg_path, 'frame%04d.jpg')
SAVE_IMAGE_FILE = '%s/data/008_sgd_opt/%s' % (pkg_path, 'iters_%03dtest_huber.png')
descriptors = []
for i in range(32):
descriptors.append('descr%02d' % (i,))
klass = ['class'.ljust(7)]
def get_image(image_id):
filename = IMAGE_BASE_STRING % (image_id,)
return cv2.imread(filename, cv2.IMREAD_COLOR)
def load_data(seed=None):
df = pd.read_csv(BULK_DATA_FILE, header=0)
# mutate data back from stored form
df['class '] = df['class '].apply(lambda cls: cls / 1000.0)
df['angle '] = df['angle '].apply(lambda ang: ang / 1000.0)
df['respons'] = df['respons'].apply(lambda res: res / 100000000.0)
# split into class, features
X = df[descriptors]
y = df[klass]
print('X.describe()')
print(X.describe())
print('y.describe()')
print(y.describe())
# use mask to split into test, train
if seed is not None:
np.random.seed(seed)
msk = np.random.rand(len(df)) < 0.8
train_X = X[msk].as_matrix()
test_X = X[~msk].as_matrix()
train_y = y[msk].as_matrix().ravel()
test_y = y[~msk].as_matrix().ravel()
return train_X, train_y, test_X, test_y
def subsample_data(X, y, ratio=0.5, seed=None):
size = 1100
rus = RandomUnderSampler(
ratio={
0: int(size * ratio),
1: int(size * (1 - ratio)),
},
random_state=seed)
return rus.fit_sample(X, y)
def increment_index_list(index, max_list):
index[-1] += 1
if index[-1] >= max_list[-1]:
for i in range(len(index) - 1, 0, -1):
if index[i] >= max_list[i]:
index[i] = 0
index[i-1] += 1
return index
def make_all_combinations(dict_of_arglists):
input_args = list(dict_of_arglists.keys())
max_list = []
for input_key in input_args:
max_list.append(len(dict_of_arglists[input_key]))
index_list = [0] * len(input_args)
count = 1
for val in max_list:
count *= val
for _ in range(count):
input_vals = []
for index, input_key in enumerate(input_args):
input_vals.append(dict_of_arglists[input_key][index_list[index]])
combined = zip(input_args, input_vals)
d = dict(combined)
yield d
index_list = increment_index_list(index_list, max_list)
if __name__ == '__main__':
### Begin the whole process ###
'''
Things to work on:
Vary up the dataset:
- Classify the total image instead of just one keypoint
- Learn based on the classification of all of the keypoints in the
image and their location
- With classification of image in hand, classify image with random
perturbations
- rotate the image gaussian ammount
- add gaussian noise to the whole image
- add gaussian brightness to the whole image
- add guassian darkness
- red
- green
- blue
- shift left, right, up, down (should be no biggy, can skip because
keypoints will just shift)
- image flip left/right, up/down?
- scaling image (zoom center)
- affine transform
- perspective transform
Once I've played with varying the dataset, I should either find a set of
images that confirm the stopsign is pretty robust or an expanded training
set to train with. From there, optimize KNN,
'''
# load data from csv, split into training and test sets
print('begin loading data')
train_X, train_y, test_X, test_y = load_data(12345)
Klassifiers = [
SGDClassifier,
]
max_iters = list(range(1,201,5))
sgd_spec = {
'loss': ['modified_huber',], # ['hinge', 'log', 'modified_huber',],
'penalty': ['l1',], # ['l2', 'l1', 'elasticnet',],
'max_iter': max_iters,
}
Klassifier_configs = []
Klassifier_configs.extend(make_all_combinations(sgd_spec))
num_tests = 8
for i in range(8,9):
num_tests = 2**i
for index, Klassifier in enumerate(Klassifiers):
acc = []
pre = []
rec = []
tim = []
for config_setup in Klassifier_configs:
print('current config: %s' % (config_setup,))
acc_accum = 0
pre_accum = 0
rec_accum = 0
tim_accum = 0
for seed in range(0, num_tests):
# print('round %4d/%4d' % (seed+1, num_tests))
train_X, train_y = subsample_data(train_X, train_y, 0.5, seed*num_tests+9105)
classifier = Klassifier(**config_setup)
classifier.fit(train_X, train_y)
X_splits = np.array_split(test_X, 437)
y_splits = np.array_split(test_y, 437)
split_version = zip(X_splits, y_splits)
for test_X_sub, test_y_sub in split_version:
# print('begin pred')
stime = datetime.datetime.now()
y_pred = classifier.predict(test_X_sub)
etime = datetime.datetime.now()
# print('end pred')
# print('begin scoring')
acc_accum += accuracy_score(y_true=test_y_sub, y_pred=y_pred)
pre_accum += precision_score(y_true=test_y_sub, y_pred=y_pred)
rec_accum += recall_score(y_true=test_y_sub, y_pred=y_pred)
tim_accum += (etime - stime).total_seconds()
# print('end scoring')
acc.append(acc_accum / (num_tests*437))
pre.append(pre_accum / (num_tests*437))
rec.append(rec_accum / (num_tests*437))
tim.append(tim_accum / (num_tests*437))
print('a: %.4f (percent correctly classified)' % (acc_accum / (num_tests*437),))
print('p: %.4f (percent of correct positives)' % (pre_accum / (num_tests*437),))
print('r: %.4f (percent of positive results found)' % (rec_accum / (num_tests*437),))
print('t: %.6f sec' % (tim_accum / (num_tests*437),))
print(Klassifier)
print('Averaged over %d tests' % (num_tests,))
# better accuracy summary
print('a: %.4f (avg percent correctly classified)' % (sum(acc)/len(acc),))
print('Top Accuracies')
print('90 percent of max accuracy cutoff')
sorted_ = sorted(enumerate(acc), key=lambda x: -x[1])
top_acc = sorted_[0][1]
sorted_ = filter(lambda x: x[1] >= top_acc * 0.9, sorted_)
for acc_index, accuracy in sorted_[:15]:
print('% 4.2f | %s' % (accuracy * 100, Klassifier_configs[acc_index]))
print('p: %.4f (avg percent of correct positives)' % (sum(pre)/len(pre),))
print('r: %.4f (avg percent of positive results found)' % (sum(rec)/len(rec),))
print('t: %.6f avg sec' % (sum(tim) / len(tim)))
print('Top Prediction Latencies')
print('Top 10')
sorted_ = sorted(enumerate(tim), key=lambda x: x[1])
for tim_index, pred_latency in sorted_[:10]:
print('%.6f | %s' % (pred_latency, Klassifier_configs[tim_index]))
print(num_tests)
print(max_iters)
print(acc)
plt.plot(max_iters, acc, label=str(num_tests))
plt.axis([min(max_iters) - 1, max(max_iters) + 1, 0, 1.0])
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.savefig(SAVE_IMAGE_FILE % (num_tests,))
# plt.show()
plt.clf()
print('did it save?')
|
buckbaskin/stopsign
|
src/v1/sgd_optimize.py
|
Python
|
mit
| 9,137
|
[
"Gaussian"
] |
3338a06b23906085bfa4069f9eec53d947c99056aa02a719ef31f35793ec466a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests/Examples for sloth.inst.rowland (pantograph movements part)
"""
__author__ = "Mauro Rovezzi"
__email__ = "mauro.rovezzi@gmail.com"
__license__ = "BSD license <http://opensource.org/licenses/BSD-3-Clause>"
import os, sys
import math
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib import rc, cm, gridspec
from matplotlib.ticker import MultipleLocator
### SLOTH ###
try:
from sloth import __version__ as sloth_version
from sloth.inst.rowland import cs_h, acenx, det_pos_rotated, RcHoriz, RcVert
from sloth.utils.genericutils import colorstr
from sloth.utils.bragg import d_cubic, d_hexagonal
except:
raise ImportError('sloth is not installed (required version >= 0.2.0)')
### USEFUL GLOBAL VARIABLES ###
SI_ALAT = 5.431065 # Ang at 25C
dSi111 = d_cubic(SI_ALAT, (1,1,1))
dSi220 = d_cubic(SI_ALAT, (2,2,0))
### TESTS FOR THE PANTOGRAPH PROTOTYPE (A.K.A. FRICTION PROTOTYPE) ###
def testFrictionPrototype(Rm, theta0, d=dSi111):
"""implemented in get_bender_pos and get_bender_mot methods in sloth 0.2.0
Pantograph parameters (sloth 0.2.0)
-----------------------------------
aW : float, 0.
crystal analyser optical width
aWext : float, 0.
crystal analyser extended width (NOTE: this width is
used in self.get_chi2, that is, the width to get two
adjacent analysers touching)
rSext : float, 0.
sagittal radius offset where aWext is referred to,
that is, aWext condition is given for Rs+rSext
aL : float, 0.
distance of analyser center from the chi rotation
(affects => Chi, SagOff)
bender : tuple of floats, (0., 0., 0.) corresponds to
(length_arm0_mm, length_arm1_mm, angle_between_arms_deg)
actuator : tuple of floats, (0., 0.) corresponts to
(axoff_actuator_mm, length_actuator_arm_mm)
here:
bender = (bender_arm0, bender_arm, bender_angle)
actuator = (act_axoff, act_dist)
ref 3D CAD: ``RowlandSketchPrototype-v1512``
"""
# get pivot points positions
t = RcHoriz(Rm=Rm, theta0=theta0, d=d,\
aW=25., aWext=32, rSext=10., aL=97.,\
showInfos=False)
bender_arm0 = 40. #arm to anchor p5 to actuator
bender_arm = 60. #arm bender to the trapezoids, mm
bender_angle = 100. #deg
act_axoff = 269. #aXoff actuator
act_dist = 135. #distance from actuator, mm
_R = t.Rs + t.rSext
c5 = t.get_chi2(5., Rs=_R)
c4 = t.get_chi2(4., Rs=_R)
c3 = t.get_chi2(3., Rs=_R)
dchi = c5-c3
# pivot points of 5, 4 and 3
s5 = t.get_axoff(c5)
p5 = t.get_sag_off(s5, retAll=True)
s4 = t.get_axoff(c4)
p4 = t.get_sag_off(s4, retAll=True)
s3 = t.get_axoff(c3)
p3 = t.get_sag_off(s3, retAll=True)
# get position of point c where the bender arm is linked, on the radius of p4
_R2 = t.Rs + t.aL
rdch = math.radians(dchi/2.)
h = _R2 * (1 - math.cos(rdch))
chalf = _R2 * math.sin(rdch)
ra = math.acos(chalf/bender_arm)
dc = bender_arm * math.sin(ra) - h
sc = t.get_axoff(c4, Rs=t.Rs+dc)
pc = t.get_sag_off(sc, retAll=True)
rb = math.acos( (p5[1]-pc[1])/bender_arm )
#print(p5[1]-pc[1])
#print('Angle last pivot point and bender = {0:.6f}'.format(180-100-math.degrees(rb)))
rc = math.pi - math.radians(bender_angle) - rb
# B point, where the actuator is anchored
#[math.degrees(rchi), aXoff, SagOff, math.degrees(rchi0), aXoff0, SagOff0]
pb = []
pb.append(0.) #no chi pos
pb.append( p5[1] + bender_arm0 * math.cos(rc) )
pb.append( p5[2] - bender_arm0 * math.sin(rc) )
#print(pb)
# up to here everything seems correct
rd = math.asin((act_axoff - pb[1]) / act_dist)
#print(math.degrees(rd))
mot_sagoff = act_dist * math.cos(rd) + pb[2]
print('Actuator position = {0} (test)'.format(mot_sagoff))
# WORKS!!!!!
return t
def testFrictionPrototypeInMethod(Rm, theta0, d=dSi111,\
aW=25., aWext=32, rSext=10., aL=97.,\
bender=(40., 60., 100.), actuator=(269., 135.),\
bender_version=0,
showInfos=True):
"""as testFrictionPrototype implemented in sloth 0.2.0"""
if not (sloth_version == '0.2.0'):
print('This test works only with sloth 0.2.0')
return 0
t = RcHoriz(Rm=Rm, theta0=theta0, d=d,\
aW=aW, aWext=aWext, rSext=rSext, aL=aL,\
bender=bender, actuator=actuator,\
bender_version=bender_version,\
showInfos=showInfos)
mot_sagoff = t.get_bender_mot(t.get_bender_pos(aN=5))
print('INFO: Actuator position = {0} (in method)'.format(mot_sagoff))
return t
### TESTS FOR THE PANTOGRAPH VERSION 2017 ###
def testPantograph2017(Rm, theta0, d=dSi111,\
aW=25., aWext=32, rSext=10., aL=97.,\
bender_version=1, bender=(40., 60., 28.), actuator=(300, 120),\
showInfos=True):
"""implemented in get_bender_pos and get_bender_mot methods in sloth 0.2.1
Pantograph parameters (sloth 0.2.1)
-----------------------------------
aW : float, 0.
crystal analyser optical width
aWext : float, 0.
crystal analyser extended width (NOTE: this width is
used in self.get_chi2, that is, the width to get two
adjacent analysers touching)
rSext : float, 0.
sagittal radius offset where aWext is referred to,
that is, aWext condition is given for Rs+rSext
aL : float, 0.
distance of analyser center from the chi rotation
(affects => Chi, SagOff)
bender_version : string, None
defines the bender tuple keyword argument
see -> self.get_bender_pos()
bender : tuple of floats, (0., 0., 0.) corresponds to
if (bender_version is None) or (bender_version == 0):
(length_arm0_mm, length_arm1_mm, angle_between_arms_deg)
if (bender_version == 1):
(length_arm0_mm, length_arm1_mm, length_anchor_actuator_mm)
actuator : tuple of floats, (0., 0.) corresponts to
(axoff_actuator_mm, length_actuator_arm_mm)
here:
bender = (bender_arm0, bender_arm, bender_angle)
actuator = (act_axoff, act_dist)
ref 3D CAD: ``RowlandSketchPrototype-v1706``
"""
#init rc + bender
rc = RcHoriz(Rm=Rm, theta0=theta0, d=d,\
aW=aW, aWext=aWext, rSext=rSext, aL=aL,\
bender_version=bender_version,\
bender=bender, actuator=actuator,
showInfos=showInfos)
### START TEST ###
aN = 5
#map last 3 pivot points positions
_c2 = [rc.get_chi2(_n) for _n in range( int(aN-2), int(aN+1) )] #CHIs
dchi = _c2[2]-_c2[0]
_p = [rc.get_sag_off(rc.get_axoff(_cn), retAll=True) for _cn in _c2] #SagOffs
#find the angle between the last pivot point _p[-1] and the bender point (B)
#we use for this the position of the end point of bender[1] (C)
_R = rc.Rs + rc.aL
rdch = math.radians(dchi/2.)
h = _R * (1 - math.cos(rdch)) #chord pivots 0 and -2
chalf = _R * math.sin(rdch) #from circular segment formula
ra = math.acos(chalf/rc.bender[1])
dc = rc.bender[1] * math.sin(ra) - h #aperture of the pantograph
#find coordinates of point B (pb) of the bender (anchor point with actuator[1])
adc = math.asin((dc/2)/rc.bender[0]) #angle opposite to dc -> OK
pdc = rc.bender[0]*math.cos(adc)
pb_ang = math.atan(pdc/(rc.Rs+rc.aL+dc/2)) #angle between last analyzer and point B
pb_h = _R * (1 - math.cos(pb_ang)) #chord
pb_chalf = _R * math.sin(pb_ang) #from circular segment formula
pb_ra = math.acos(pb_chalf/rc.bender[0])
pb_dc = rc.bender[0] * math.sin(pb_ra) - pb_h
pb_chi = pb_ang + rc.get_chi2(aN, inDeg=False) #radians
pb_rs = rc.Rs + rc.aL + pb_dc
pb_axoff = pb_rs * math.sin(pb_chi)
pb_sagoff = _R - (pb_rs * math.cos(pb_chi))
### END TEST ###
### AS IMPLEMENTED ###
pb_in = rc.get_bender_pos(aN=5)
print("Test bender point:")
print("(here) at ({0:.5f}, {1:.5f})".format(pb_axoff, pb_sagoff))
print("(in method) at ({0:.5f}, {1:.5f})".format(pb_in[0], pb_in[1]))
act_mot_pos = rc.get_bender_mot(pb_in)
print("Actuator motor position (in local sagittal reference): {0:.5f}".format(act_mot_pos))
return rc
if __name__ == "__main__":
#plt.close('all')
#t1 = testFrictionPrototype(240., 65.)
#t = testFrictionPrototypeInMethod(250., 35.)
t = testPantograph2017(240., 35.)
|
maurov/xraysloth
|
examples/rowland_pantograph_tests.py
|
Python
|
bsd-3-clause
| 9,042
|
[
"CRYSTAL"
] |
03922a2645ffcbd0e6d00f4e9f61a47728479a830c28a973419ffcea608cfe47
|
import datetime
from django.core.management.base import NoArgsCommand
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Q, F
from askbot.models import User, Post, PostRevision, Thread
from askbot.models import Activity, EmailFeedSetting
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings as django_settings
from askbot.conf import settings as askbot_settings
from django.utils.datastructures import SortedDict
from django.contrib.contenttypes.models import ContentType
from askbot import const
from askbot import mail
from askbot.utils.slug import slugify
DEBUG_THIS_COMMAND = False
def get_all_origin_posts(mentions):
origin_posts = set()
for mention in mentions:
post = mention.content_object
origin_posts.add(post.get_origin_post())
return list(origin_posts)
#todo: refactor this as class
def extend_question_list(
src, dst, cutoff_time = None,
limit=False, add_mention=False,
add_comment = False
):
"""src is a query set with questions
or None
dst - is an ordered dictionary
update reporting cutoff time for each question
to the latest value to be more permissive about updates
"""
if src is None:#is not QuerySet
return #will not do anything if subscription of this type is not used
if limit and len(dst.keys()) >= askbot_settings.MAX_ALERTS_PER_EMAIL:
return
if cutoff_time is None:
if hasattr(src, 'cutoff_time'):
cutoff_time = src.cutoff_time
else:
raise ValueError('cutoff_time is a mandatory parameter')
for q in src:
if q in dst:
meta_data = dst[q]
else:
meta_data = {'cutoff_time': cutoff_time}
dst[q] = meta_data
if cutoff_time > meta_data['cutoff_time']:
#the latest cutoff time wins for a given question
#if the question falls into several subscription groups
#this makes mailer more eager in sending email
meta_data['cutoff_time'] = cutoff_time
if add_mention:
if 'mentions' in meta_data:
meta_data['mentions'] += 1
else:
meta_data['mentions'] = 1
if add_comment:
if 'comments' in meta_data:
meta_data['comments'] += 1
else:
meta_data['comments'] = 1
def format_action_count(string, number, output):
if number > 0:
output.append(_(string) % {'num':number})
class Command(NoArgsCommand):
def handle_noargs(self, **options):
if askbot_settings.ENABLE_EMAIL_ALERTS:
try:
try:
self.send_email_alerts()
except Exception, e:
print e
finally:
connection.close()
def get_updated_questions_for_user(self, user):
"""
retreive relevant question updates for the user
according to their subscriptions and recorded question
views
"""
user_feeds = EmailFeedSetting.objects.filter(
subscriber=user
).exclude(
frequency__in=('n', 'i')
)
should_proceed = False
for feed in user_feeds:
if feed.should_send_now() == True:
should_proceed = True
break
#shortcircuit - if there is no ripe feed to work on for this user
if should_proceed == False:
return {}
#these are placeholders for separate query sets per question group
#there are four groups - one for each EmailFeedSetting.feed_type
#and each group has subtypes A and B
#that's because of the strange thing commented below
#see note on Q and F objects marked with todo tag
q_sel_A = None
q_sel_B = None
q_ask_A = None
q_ask_B = None
q_ans_A = None
q_ans_B = None
q_all_A = None
q_all_B = None
#base question query set for this user
#basic things - not deleted, not closed, not too old
#not last edited by the same user
base_qs = Post.objects.get_questions().exclude(
thread__last_activity_by=user
).exclude(
thread__last_activity_at__lt=user.date_joined#exclude old stuff
).exclude(
deleted=True
).exclude(
thread__closed=True
).order_by('-thread__last_activity_at')
if askbot_settings.ENABLE_CONTENT_MODERATION:
base_qs = base_qs.filter(approved = True)
#todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
# Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
#returns way more questions than you might think it should
#so because of that I've created separate query sets Q_set2 and Q_set3
#plus two separate queries run faster!
#build two two queries based
#questions that are not seen by the user at all
not_seen_qs = base_qs.filter(~Q(viewed__who=user))
#questions that were seen, but before last modification
seen_before_last_mod_qs = base_qs.filter(
Q(
viewed__who=user,
viewed__when__lt=F('thread__last_activity_at')
)
)
#shorten variables for convenience
Q_set_A = not_seen_qs
Q_set_B = seen_before_last_mod_qs
for feed in user_feeds:
if feed.feed_type == 'm_and_c':
#alerts on mentions and comments are processed separately
#because comments to questions do not trigger change of last_updated
#this may be changed in the future though, see
#http://askbot.org/en/question/96/
continue
#each group of updates represented by the corresponding
#query set has it's own cutoff time
#that cutoff time is computed for each user individually
#and stored as a parameter "cutoff_time"
#we won't send email for a given question if an email has been
#sent after that cutoff_time
if feed.should_send_now():
if DEBUG_THIS_COMMAND == False:
feed.mark_reported_now()
cutoff_time = feed.get_previous_report_cutoff_time()
if feed.feed_type == 'q_sel':
q_sel_A = Q_set_A.filter(thread__followed_by=user)
q_sel_A.cutoff_time = cutoff_time #store cutoff time per query set
q_sel_B = Q_set_B.filter(thread__followed_by=user)
q_sel_B.cutoff_time = cutoff_time #store cutoff time per query set
elif feed.feed_type == 'q_ask':
q_ask_A = Q_set_A.filter(author=user)
q_ask_A.cutoff_time = cutoff_time
q_ask_B = Q_set_B.filter(author=user)
q_ask_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_ans':
q_ans_A = Q_set_A.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_A = q_ans_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_ans_A.cutoff_time = cutoff_time
q_ans_B = Q_set_B.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_B = q_ans_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_ans_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_all':
q_all_A = user.get_tag_filtered_questions(Q_set_A)
q_all_B = user.get_tag_filtered_questions(Q_set_B)
q_all_A = q_all_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_all_B = q_all_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_all_A.cutoff_time = cutoff_time
q_all_B.cutoff_time = cutoff_time
#build ordered list questions for the email report
q_list = SortedDict()
#todo: refactor q_list into a separate class?
extend_question_list(q_sel_A, q_list)
extend_question_list(q_sel_B, q_list)
#build list of comment and mention responses here
#it is separate because posts are not marked as changed
#when people add comments
#mention responses could be collected in the loop above, but
#it is inconvenient, because feed_type m_and_c bundles the two
#also we collect metadata for these here
try:
feed = user_feeds.get(feed_type='m_and_c')
if feed.should_send_now():
cutoff_time = feed.get_previous_report_cutoff_time()
comments = Post.objects.get_comments().filter(
added_at__lt = cutoff_time,
).exclude(
author = user
)
q_commented = list()
for c in comments:
post = c.parent
if post.author != user:
continue
#skip is post was seen by the user after
#the comment posting time
q_commented.append(post.get_origin_post())
extend_question_list(
q_commented,
q_list,
cutoff_time = cutoff_time,
add_comment = True
)
mentions = Activity.objects.get_mentions(
mentioned_at__lt = cutoff_time,
mentioned_whom = user
)
#print 'have %d mentions' % len(mentions)
#MM = Activity.objects.filter(activity_type = const.TYPE_ACTIVITY_MENTION)
#print 'have %d total mentions' % len(MM)
#for m in MM:
# print m
mention_posts = get_all_origin_posts(mentions)
q_mentions_id = [q.id for q in mention_posts]
q_mentions_A = Q_set_A.filter(id__in = q_mentions_id)
q_mentions_A.cutoff_time = cutoff_time
extend_question_list(q_mentions_A, q_list, add_mention=True)
q_mentions_B = Q_set_B.filter(id__in = q_mentions_id)
q_mentions_B.cutoff_time = cutoff_time
extend_question_list(q_mentions_B, q_list, add_mention=True)
except EmailFeedSetting.DoesNotExist:
pass
if user.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
extend_question_list(q_all_A, q_list)
extend_question_list(q_all_B, q_list)
extend_question_list(q_ask_A, q_list, limit=True)
extend_question_list(q_ask_B, q_list, limit=True)
extend_question_list(q_ans_A, q_list, limit=True)
extend_question_list(q_ans_B, q_list, limit=True)
if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
extend_question_list(q_all_A, q_list, limit=True)
extend_question_list(q_all_B, q_list, limit=True)
ctype = ContentType.objects.get_for_model(Post)
EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT
#up to this point we still don't know if emails about
#collected questions were sent recently
#the next loop examines activity record and decides
#for each question, whether it needs to be included or not
#into the report
for q, meta_data in q_list.items():
#this loop edits meta_data for each question
#so that user will receive counts on new edits new answers, etc
#and marks questions that need to be skipped
#because an email about them was sent recently enough
#also it keeps a record of latest email activity per question per user
try:
#todo: is it possible to use content_object here, instead of
#content type and object_id pair?
update_info = Activity.objects.get(
user=user,
content_type=ctype,
object_id=q.id,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = update_info.active_at
except Activity.DoesNotExist:
update_info = Activity(
user=user,
content_object=q,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = datetime.datetime(1970, 1, 1)#long time ago
except Activity.MultipleObjectsReturned:
raise Exception(
'server error - multiple question email activities '
'found per user-question pair'
)
cutoff_time = meta_data['cutoff_time']#cutoff time for the question
#skip question if we need to wait longer because
#the delay before the next email has not yet elapsed
#or if last email was sent after the most recent modification
if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
meta_data['skip'] = True
continue
#collect info on all sorts of news that happened after
#the most recent emailing to the user about this question
q_rev = q.revisions.filter(revised_at__gt=emailed_at)
q_rev = q_rev.exclude(author=user)
#now update all sorts of metadata per question
meta_data['q_rev'] = len(q_rev)
if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
meta_data['q_rev'] = 0
meta_data['new_q'] = True
else:
meta_data['new_q'] = False
new_ans = Post.objects.get_answers(user).filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
)
new_ans = new_ans.exclude(author=user)
meta_data['new_ans'] = len(new_ans)
ans_ids = Post.objects.get_answers(user).filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
).values_list(
'id', flat = True
)
ans_rev = PostRevision.objects.filter(post__id__in = ans_ids)
ans_rev = ans_rev.exclude(author=user).distinct()
meta_data['ans_rev'] = len(ans_rev)
comments = meta_data.get('comments', 0)
mentions = meta_data.get('mentions', 0)
#print meta_data
#finally skip question if there are no news indeed
if len(q_rev) + len(new_ans) + len(ans_rev) + comments + mentions == 0:
meta_data['skip'] = True
#print 'skipping'
else:
meta_data['skip'] = False
#print 'not skipping'
update_info.active_at = datetime.datetime.now()
if DEBUG_THIS_COMMAND == False:
update_info.save() #save question email update activity
#q_list is actually an ordered dictionary
#print 'user %s gets %d' % (user.username, len(q_list.keys()))
#todo: sort question list by update time
return q_list
def send_email_alerts(self):
#does not change the database, only sends the email
#todo: move this to template
for user in User.objects.all():
user.add_missing_askbot_subscriptions()
#todo: q_list is a dictionary, not a list
q_list = self.get_updated_questions_for_user(user)
if len(q_list.keys()) == 0:
continue
num_q = 0
for question, meta_data in q_list.items():
if meta_data['skip']:
del q_list[question]
else:
num_q += 1
if num_q > 0:
url_prefix = askbot_settings.APP_URL
threads = Thread.objects.filter(id__in=[qq.thread_id for qq in q_list.keys()])
tag_summary = Thread.objects.get_tag_summary_from_threads(threads)
question_count = len(q_list.keys())
subject_line = ungettext(
'%(question_count)d updated question about %(topics)s',
'%(question_count)d updated questions about %(topics)s',
question_count
) % {
'question_count': question_count,
'topics': tag_summary
}
#todo: send this to special log
#print 'have %d updated questions for %s' % (num_q, user.username)
text = ungettext(
'<p>Dear %(name)s,</p><p>The following question has been updated '
'%(sitename)s</p>',
'<p>Dear %(name)s,</p><p>The following %(num)d questions have been '
'updated on %(sitename)s:</p>',
num_q
) % {
'num':num_q,
'name':user.username,
'sitename': askbot_settings.APP_SHORT_NAME
}
text += '<ul>'
items_added = 0
items_unreported = 0
for q, meta_data in q_list.items():
act_list = []
if meta_data['skip']:
continue
if items_added >= askbot_settings.MAX_ALERTS_PER_EMAIL:
items_unreported = num_q - items_added #may be inaccurate actually, but it's ok
else:
items_added += 1
if meta_data['new_q']:
act_list.append(_('new question'))
format_action_count('%(num)d rev', meta_data['q_rev'],act_list)
format_action_count('%(num)d ans', meta_data['new_ans'],act_list)
format_action_count('%(num)d ans rev',meta_data['ans_rev'],act_list)
act_token = ', '.join(act_list)
text += '<li><a href="%s?sort=latest">%s</a> <font color="#777777">(%s)</font></li>' \
% (url_prefix + q.get_absolute_url(), q.thread.title, act_token)
text += '</ul>'
text += '<p></p>'
#if len(q_list.keys()) >= askbot_settings.MAX_ALERTS_PER_EMAIL:
# text += _('There may be more questions updated since '
# 'you have logged in last time as this list is '
# 'abridged for your convinience. Please visit '
# 'the askbot and see what\'s new!<br>'
# )
link = url_prefix + reverse(
'user_subscriptions',
kwargs = {
'id': user.id,
'slug': slugify(user.username)
}
)
text += _(
'<p>Please remember that you can always <a '
'href="%(email_settings_link)s">adjust</a> frequency of the email updates or '
'turn them off entirely.<br/>If you believe that this message was sent in an '
'error, please email about it the forum administrator at %(admin_email)s.</'
'p><p>Sincerely,</p><p>Your friendly %(sitename)s server.</p>'
) % {
'email_settings_link': link,
'admin_email': django_settings.ADMINS[0][1],
'sitename': askbot_settings.APP_SHORT_NAME
}
if DEBUG_THIS_COMMAND == True:
recipient_email = django_settings.ADMINS[0][1]
else:
recipient_email = user.email
mail.send_mail(
subject_line = subject_line,
body_text = text,
recipient_list = [recipient_email]
)
|
erichegt/askbot-devel
|
askbot/management/commands/send_email_alerts.py
|
Python
|
gpl-3.0
| 21,850
|
[
"VisIt"
] |
b3e29ab0c2f73128870d5fb28551b852e3e81d18ab0af5f6dcbab4bcd1fa3eac
|
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Client, Dependent, Visit
class ClientSummaryView(TemplateView):
template_name = "client_summary.html"
class ClientListView(LoginRequiredMixin, TemplateView):
template_name = "client_list.html"
def get_context_data(self, **kwargs):
context = super(ClientListView, self).get_context_data(**kwargs)
clients = []
for client in Client.objects.all():
clients.append({
"id": client.id,
"name": client.truncated_name(24),
"url": "/clients/{}/".format(client.id),
"regnum": client.id_number,
"notes": client.truncated_notes(24),
})
context['clients'] = clients
return context
class ClientDetailView(LoginRequiredMixin, DetailView):
template_name = "client_detail.html"
model = Client
|
tomp/food_pantry
|
clients/views.py
|
Python
|
mit
| 1,071
|
[
"VisIt"
] |
01390ca403d3c99fd33bb13d813261f95dd2b5b36c4835bfb3ee7c3d90eefc90
|
# Made by Mr. - Version 0.3 by kmarty and DrLecter
# Shadow Weapon Coupons contributed by BiTi for the Official L2J Datapack Project
# Visit http://forum.l2jdp.com for more details
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "406_PathToElvenKnight"
SORIUS_LETTER1 = 1202
KLUTO_BOX = 1203
ELVEN_KNIGHT_BROOCH = 1204
TOPAZ_PIECE = 1205
EMERALD_PIECE = 1206
KLUTO_MEMO = 1276
#messages
default="<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
player = st.getPlayer()
if event == "30327-05.htm" :
if player.getClassId().getId() != 0x12 :
if player.getClassId().getId() == 0x13 :
htmltext = "30327-02a.htm"
else:
htmltext = "30327-02.htm"
st.exitQuest(1)
else:
if player.getLevel()<19 :
htmltext = "30327-03.htm"
st.exitQuest(1)
else:
if st.getQuestItemsCount(ELVEN_KNIGHT_BROOCH) :
htmltext = "30327-04.htm"
elif event == "30327-06.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "30317-02.htm" :
if st.getInt("cond") == 3 :
st.takeItems(SORIUS_LETTER1,-1)
if st.getQuestItemsCount(KLUTO_MEMO) == 0 :
st.giveItems(KLUTO_MEMO,1)
st.set("cond","4")
else :
htmltext = default
else :
htmltext = default
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30327 and id != STARTED : return htmltext
if id == CREATED :
st.set("cond","0")
cond=0
else :
cond=st.getInt("cond")
if npcId == 30327 :
if cond == 0 :
htmltext = "30327-01.htm"
elif cond == 1 :
if st.getQuestItemsCount(TOPAZ_PIECE)==0 :
htmltext = "30327-07.htm"
else:
htmltext = "30327-08.htm"
elif cond == 2 :
if st.getQuestItemsCount(SORIUS_LETTER1) == 0 :
st.giveItems(SORIUS_LETTER1,1)
st.set("cond","3")
htmltext = "30327-09.htm"
elif cond in [3, 4, 5] :
htmltext = "30327-11.htm"
elif cond == 6 :
st.takeItems(KLUTO_BOX,-1)
st.set("cond","0")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
if st.getQuestItemsCount(ELVEN_KNIGHT_BROOCH) == 0 :
st.giveItems(ELVEN_KNIGHT_BROOCH,1)
htmltext = "30327-10.htm"
elif npcId == 30317 :
if cond == 3 :
htmltext = "30317-01.htm"
elif cond == 4 :
if st.getQuestItemsCount(EMERALD_PIECE)==0 :
htmltext = "30317-03.htm"
else:
htmltext = "30317-04.htm"
elif cond == 5 :
st.takeItems(EMERALD_PIECE,-1)
st.takeItems(TOPAZ_PIECE,-1)
if st.getQuestItemsCount(KLUTO_BOX) == 0 :
st.giveItems(KLUTO_BOX,1)
st.takeItems(KLUTO_MEMO,-1)
st.set("cond","6")
htmltext = "30317-05.htm"
elif cond == 6 :
htmltext = "30317-06.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
if npcId != 20782 :
if st.getInt("cond")==1 and st.getQuestItemsCount(TOPAZ_PIECE)<20 and st.getRandom(100)<70 :
st.giveItems(TOPAZ_PIECE,1)
if st.getQuestItemsCount(TOPAZ_PIECE) == 20 :
st.playSound("ItemSound.quest_middle")
st.set("cond","2")
else:
st.playSound("ItemSound.quest_itemget")
else :
if st.getInt("cond")==4 and st.getQuestItemsCount(EMERALD_PIECE)<20 and st.getRandom(100)<50 :
st.giveItems(EMERALD_PIECE,1)
if st.getQuestItemsCount(EMERALD_PIECE) == 20 :
st.playSound("ItemSound.quest_middle")
st.set("cond","5")
else:
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(406,qn,"Path To Elven Knight")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30327)
QUEST.addTalkId(30327)
QUEST.addTalkId(30317)
QUEST.addTalkId(30327)
QUEST.addKillId(20035)
QUEST.addKillId(20042)
QUEST.addKillId(20045)
QUEST.addKillId(20051)
QUEST.addKillId(20054)
QUEST.addKillId(20060)
QUEST.addKillId(20782)
STARTED.addQuestDrop(30327,SORIUS_LETTER1,1)
STARTED.addQuestDrop(20782,EMERALD_PIECE,1)
STARTED.addQuestDrop(20054,TOPAZ_PIECE,1)
STARTED.addQuestDrop(30317,KLUTO_MEMO,1)
STARTED.addQuestDrop(30317,KLUTO_BOX,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/406_PathToElvenKnight/__init__.py
|
Python
|
gpl-3.0
| 5,385
|
[
"VisIt"
] |
11c1cd1042aff3e0860ba8f954937684714521e808c3839f4cdb3272a7356cd3
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
from textwrap import dedent
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'speech', 'v1',
artman_output_name='google-cloud-ruby/google-cloud-speech'
)
s.copy(v1_library / 'acceptance')
s.copy(v1_library / 'lib/google/cloud/speech/v1.rb')
s.copy(v1_library / 'lib/google/cloud/speech/v1')
s.copy(v1_library / 'test/google/cloud/speech/v1')
s.copy(v1_library / 'lib/google/cloud/speech.rb')
s.copy(v1_library / 'README.md')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-speech.gemspec', merge=ruby.merge_gemspec)
# Copy common templates
templates = gcp.CommonTemplates().ruby_library()
s.copy(templates)
v1p1beta1_library = gapic.ruby_library(
'speech', 'v1p1beta1',
artman_output_name='google-cloud-ruby/google-cloud-speech'
)
s.copy(v1p1beta1_library / 'acceptance')
s.copy(v1p1beta1_library / 'lib/google/cloud/speech/v1p1beta1.rb')
s.copy(v1p1beta1_library / 'lib/google/cloud/speech/v1p1beta1')
s.copy(v1p1beta1_library / 'test/google/cloud/speech/v1p1beta1')
# PERMANENT: Install partial gapics
s.replace(
'lib/google/cloud/speech/v1.rb',
'require "google/cloud/speech/v1/speech_client"',
'require "google/cloud/speech/v1/speech_client"\nrequire "google/cloud/speech/v1/helpers"')
s.replace(
'lib/google/cloud/speech/v1p1beta1.rb',
'require "google/cloud/speech/v1p1beta1/speech_client"',
'require "google/cloud/speech/v1p1beta1/speech_client"\nrequire "google/cloud/speech/v1p1beta1/helpers"')
# PERMANENT: Remove methods replaced by partial gapics
ruby.delete_method(
[
'lib/google/cloud/speech/v1/speech_client.rb',
'lib/google/cloud/speech/v1p1beta1/speech_client.rb'
],
'streaming_recognize')
# PERMANENT: Remove streaming test from generated tests
s.replace(
[
'test/google/cloud/speech/v1/speech_client_test.rb',
'test/google/cloud/speech/v1p1beta1/speech_client_test.rb'
],
f'\\n(\\s+)describe \'streaming_recognize\' do\\n+(\\1\\s\\s[^\\n]+\\n+)*\\1end\\n',
'\n')
# PERMANENT: Add migration guide to docs
s.replace(
'lib/google/cloud/speech.rb',
'# ### Preview',
dedent("""\
# ### Migration Guide
#
# The 0.30.0 release introduced breaking changes relative to the previous
# release, 0.29.0. For more details and instructions to migrate your code,
# please visit the [migration
# guide](https://cloud.google.com/speech-to-text/docs/ruby-client-migration).
#
# ### Preview"""))
# PERMANENT: Add migration guide to readme
s.replace(
'README.md',
'### Preview\n',
dedent("""\
### Migration Guide
The 0.30.0 release introduced breaking changes relative to the previous release,
0.29.0. For more details and instructions to migrate your code, please visit the
[migration
guide](https://cloud.google.com/speech-to-text/docs/ruby-client-migration).
### Preview\n"""))
# PERMANENT: Add post-install message
s.replace(
'google-cloud-speech.gemspec',
'gem.platform(\s+)= Gem::Platform::RUBY',
dedent("""\
gem.post_install_message =
"The 0.30.0 release introduced breaking changes relative to the "\\
"previous release, 0.29.0. For more details and instructions to migrate "\\
"your code, please visit the migration guide: "\\
"https://cloud.google.com/speech-to-text/docs/ruby-client-migration."
gem.platform\\1= Gem::Platform::RUBY"""))
# Support for service_address
s.replace(
[
'lib/google/cloud/speech.rb',
'lib/google/cloud/speech/v*.rb',
'lib/google/cloud/speech/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/speech/v*.rb',
'lib/google/cloud/speech/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/speech/v*.rb',
'lib/google/cloud/speech/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/speech/v*/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/speech/v*/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
s.replace(
'google-cloud-speech.gemspec',
'\n gem\\.add_dependency "google-gax", "~> 1\\.[\\d\\.]+"\n',
'\n gem.add_dependency "google-gax", "~> 1.7"\n')
# https://github.com/googleapis/gapic-generator/issues/2122
s.replace(
[
'lib/google/cloud/speech.rb',
'lib/google/cloud/speech/v1.rb',
'lib/google/cloud/speech/v1p1beta1.rb'
],
'gs://gapic-toolkit/hello.flac',
'gs://bucket-name/hello.flac')
# https://github.com/googleapis/gapic-generator/issues/2232
s.replace(
[
'lib/google/cloud/speech/v1/speech_client.rb',
'lib/google/cloud/speech/v1p1beta1/speech_client.rb'
],
'\n\n(\\s+)class OperationsClient < Google::Longrunning::OperationsClient',
'\n\n\\1# @private\n\\1class OperationsClient < Google::Longrunning::OperationsClient')
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/speech/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
s.replace(
'google-cloud-speech.gemspec',
'"README.md", "LICENSE"',
'"README.md", "AUTHENTICATION.md", "LICENSE"'
)
s.replace(
'.yardopts',
'README.md\n',
'README.md\nAUTHENTICATION.md\nLICENSE\n'
)
# https://github.com/googleapis/gapic-generator/issues/2393
s.replace(
'google-cloud-speech.gemspec',
'gem.add_development_dependency "rubocop".*$',
'gem.add_development_dependency "rubocop", "~> 0.64.0"'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'google-cloud-speech.gemspec',
'\nGem::Specification.new do',
'require File.expand_path("../lib/google/cloud/speech/version", __FILE__)\n\nGem::Specification.new do'
)
s.replace(
'google-cloud-speech.gemspec',
'(gem.version\s+=\s+).\d+.\d+.\d.*$',
'\\1Google::Cloud::Speech::VERSION'
)
for version in ['v1', 'v1p1beta1']:
s.replace(
f'lib/google/cloud/speech/{version}/*_client.rb',
f'(require \".*credentials\"\n)\n',
f'\\1require "google/cloud/speech/version"\n\n'
)
s.replace(
f'lib/google/cloud/speech/{version}/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::Speech::VERSION'
)
# Fix links for devsite migration
for file in ['lib/**/*.rb', '*.md']:
s.replace(
file,
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'*.md',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-speech/latest/file.AUTHENTICATION.html'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-speech/latest/file.AUTHENTICATION.html'
)
s.replace(
'README.md',
'github.io/google-cloud-ruby/#/docs/google-cloud-speech/latest/.*$',
'dev/ruby/google-cloud-speech/latest'
)
|
blowmage/gcloud-ruby
|
google-cloud-speech/synth.py
|
Python
|
apache-2.0
| 9,603
|
[
"VisIt"
] |
777a72e5352e79b68d4b4b171d96f25fbaf3fa27ad42dfe45e6d5831f55000b5
|
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
|
zwChan/VATEC
|
~/eb-virt/Lib/os.py
|
Python
|
apache-2.0
| 26,652
|
[
"VisIt"
] |
c8e67622df3b0e6455038a21fb0e25f1ed1071ee841fc745a54d4be2b7d88391
|
""" This helper looks in the /Operations section of the CS, considering its specific nature:
the /Operations section is designed in a way that each configuration can be specific to a Setup, while maintaining a default.
So, for example, given the following /Operations section::
Operations/
Default/
someSection/
someOption = someValue
aSecondOption = aSecondValue
Production/
someSection/
someOption = someValueInProduction
aSecondOption = aSecondValueInProduction
Certification/
someSection/
someOption = someValueInCertification
The following calls would give different results based on the setup::
Operations().getValue('someSection/someOption')
- someValueInProduction if we are in 'Production' setup
- someValueInCertification if we are in 'Certification' setup
Operations().getValue('someSection/aSecondOption')
- aSecondValueInProduction if we are in 'Production' setup
- aSecondValue if we are in 'Certification' setup <- looking in Default since there's no Certification/someSection/aSecondOption
"""
import thread
import types
import os
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities import CFG, LockRing
from DIRAC.ConfigurationSystem.Client.Helpers import Registry, CSGlobals
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
class Operations( object ):
""" Operations class
The /Operations CFG section is maintained in a cache by an Operations object
"""
__cache = {}
__cacheVersion = 0
__cacheLock = LockRing.LockRing().getLock()
def __init__( self, vo = False, group = False, setup = False ):
""" c'tor
Setting some defaults
"""
self.__uVO = vo
self.__uGroup = group
self.__uSetup = setup
self.__vo = False
self.__setup = False
self.__discoverSettings()
def __discoverSettings( self ):
#Set the VO
globalVO = CSGlobals.getVO()
if globalVO:
self.__vo = globalVO
elif self.__uVO:
self.__vo = self.__uVO
elif self.__uGroup:
self.__vo = Registry.getVOForGroup( self.__uGroup )
if not self.__vo:
self.__vo = False
else:
result = getVOfromProxyGroup()
if result['OK']:
self.__vo = result['Value']
#Set the setup
self.__setup = False
if self.__uSetup:
self.__setup = self.__uSetup
else:
self.__setup = CSGlobals.getSetup()
def __getCache( self ):
Operations.__cacheLock.acquire()
try:
currentVersion = gConfigurationData.getVersion()
if currentVersion != Operations.__cacheVersion:
Operations.__cache = {}
Operations.__cacheVersion = currentVersion
cacheKey = ( self.__vo, self.__setup )
if cacheKey in Operations.__cache:
return Operations.__cache[ cacheKey ]
mergedCFG = CFG.CFG()
for path in self.__getSearchPaths():
pathCFG = gConfigurationData.mergedCFG[ path ]
if pathCFG:
mergedCFG = mergedCFG.mergeWith( pathCFG )
Operations.__cache[ cacheKey ] = mergedCFG
return Operations.__cache[ cacheKey ]
finally:
try:
Operations.__cacheLock.release()
except thread.error:
pass
def setVO( self, vo ):
""" False to auto detect VO
"""
self.__uVO = vo
self.__discoverSettings()
def setGroup( self, group ):
""" False to auto detect VO
"""
self.__uGroup = group
self.__discoverSettings()
def setSetup( self, setup ):
""" False to auto detect
"""
self.__uSetup = setup
self.__discoverSettings()
def __getSearchPaths( self ):
paths = [ "/Operations/Defaults", "/Operations/%s" % self.__setup ]
if not self.__vo:
globalVO = CSGlobals.getVO()
if not globalVO:
return paths
self.__vo = CSGlobals.getVO()
paths.append( "/Operations/%s/Defaults" % self.__vo )
paths.append( "/Operations/%s/%s" % ( self.__vo, self.__setup ) )
return paths
def getValue( self, optionPath, defaultValue = None ):
return self.__getCache().getOption( optionPath, defaultValue )
def __getCFG( self, sectionPath ):
cacheCFG = self.__getCache()
section = cacheCFG.getRecursive( sectionPath )
if not section:
return S_ERROR( "%s in Operations does not exist" % sectionPath )
sectionCFG = section[ 'value' ]
if type( sectionCFG ) in ( types.StringType, types.UnicodeType ):
return S_ERROR( "%s in Operations is not a section" % sectionPath )
return S_OK( sectionCFG )
def getSections( self, sectionPath, listOrdered = False ):
result = self.__getCFG( sectionPath )
if not result[ 'OK' ]:
return result
sectionCFG = result[ 'Value' ]
return S_OK( sectionCFG.listSections( listOrdered ) )
def getOptions( self, sectionPath, listOrdered = False ):
result = self.__getCFG( sectionPath )
if not result[ 'OK' ]:
return result
sectionCFG = result[ 'Value' ]
return S_OK( sectionCFG.listOptions( listOrdered ) )
def getOptionsDict( self, sectionPath ):
result = self.__getCFG( sectionPath )
if not result[ 'OK' ]:
return result
sectionCFG = result[ 'Value' ]
data = {}
for opName in sectionCFG.listOptions():
data[ opName ] = sectionCFG[ opName ]
return S_OK( data )
def getPath( self, option, vo = False, setup = False ):
"""
Generate the CS path for an option:
- if vo is not defined, the helper's vo will be used for multi VO installations
- if setup evaluates False (except None) -> The helpers setup will be used
- if setup is defined -> whatever is defined will be used as setup
- if setup is None -> Defaults will be used
:param option: path with respect to the Operations standard path
:type option: string
"""
for path in self.__getSearchPaths():
optionPath = os.path.join( path, option )
value = gConfig.getValue( optionPath , 'NoValue' )
if value != "NoValue":
return optionPath
return ''
|
vmendez/DIRAC
|
ConfigurationSystem/Client/Helpers/Operations.py
|
Python
|
gpl-3.0
| 6,272
|
[
"DIRAC"
] |
f6ea30143ede85534e59148171eb8c1821decb31173b524de2e3d59e5fdc9347
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
"""Test whether discrete NB classes use provided prior
when using partial_fit"""
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
"""Test for issue #4268.
Tests that the feature log prob value computed by BernoulliNB when
alpha=1.0 is equal to the expression given in Manning, Raghavan,
and Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
"""
Tests that BernoulliNB when alpha=1.0 gives the same values as
those given for the toy example in Manning, Raghavan, and
Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
mehdidc/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 16,330
|
[
"Gaussian"
] |
9d9c7e394b38a5852b17cdeb8672a272765da120447ecdbbe6eb7c30aba06514
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/vtk_volume_v2'
# -----------------------------------------------------------------------------
from vtk import *
from tonic.vtk.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0)
else:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
imageWriter = vtkPNGWriter()
def writeDepthMap(imageData, path):
width = imageData.GetDimensions()[0]
height = imageData.GetDimensions()[1]
nbTuples = width * height
inputArray = imageData.GetPointData().GetArray(0)
array = bytearray(nbTuples)
for idx in range(inputArray.GetNumberOfTuples()):
array[idx] = 255 - int(inputArray.GetValue(idx))
with open(path, 'wb') as f:
f.write(array)
def writeColorMap(imageData, path):
imageWriter.SetInputData(imageData)
imageWriter.SetFileName(path)
imageWriter.Write()
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
source = vtkRTAnalyticSource()
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(source.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(37.35310363769531, 0.231373, 0.298039, 0.752941)
colorFunction.AddRGBPoint(157.0909652709961, 0.865003, 0.865003, 0.865003)
colorFunction.AddRGBPoint(276.8288269042969, 0.705882, 0.0156863, 0.14902)
dataRange = [37.3, 276.8]
nbSteps = 10
halfSpread = (dataRange[1] - dataRange[0]) / float(2*nbSteps)
centers = [ dataRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
# volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(500, 500)
renderer = vtkRenderer()
renderer.SetBackground(0.5, 0.5, 0.6)
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
colorMap = vtkImageData()
depthMap = vtkImageData()
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
dsb = ImageDataSetBuilder(dataset_destination_path, 'image/png', {'type': 'spherical', 'phi': range(0, 360, 30), 'theta': range(-60, 61, 30)})
# Add PieceWise navigation
dsb.getDataHandler().registerArgument(priority=1, name='pwf', label='Transfer function', values=centers, ui='slider')
# Add Depth data
dsb.getDataHandler().registerData(name='depth', type='array', fileName='_depth.uint8', metadata={ 'dimensions': window.GetSize() })
# Loop over data and generate images
dsb.start(window, renderer)
for center in dsb.getDataHandler().pwf:
updatePieceWise(scalarOpacity, dataRange, center, halfSpread)
for camera in dsb.getCamera():
dsb.updateCamera(camera)
mapper.GetColorImage(colorMap)
writeColorMap(colorMap, dsb.getDataHandler().getDataAbsoluteFilePath('image'))
mapper.GetDepthImage(depthMap)
writeDepthMap(depthMap, dsb.getDataHandler().getDataAbsoluteFilePath('depth'))
dsb.stop()
|
Kitware/tonic-data-generator
|
scripts/vtk/samples/syntax-evolution-volume_v2.py
|
Python
|
bsd-3-clause
| 4,351
|
[
"VTK"
] |
6e155dc389015478b1c3de2abfd650caf01e3743c7e137dd92c64d2786a6646d
|
# -*- coding: utf-8 -*-
""" R specifics checks """
# Let's disable the complain about how short R is
# pylint: disable=C0103
import re
import os
import urllib
import rpm
from FedoraReview import CheckBase, RegistryBase
class Registry(RegistryBase):
''' Register all check in this file in group 'R' '''
group = 'R'
def is_applicable(self):
""" Check is the tests are applicable, here it checks whether
it is a R package (spec starts with 'R-') or not.
"""
if self.is_user_enabled():
return self.user_enabled_value()
return self.checks.spec.name.startswith("R-")
class RCheckBase(CheckBase):
""" Base class for all R specific checks. """
DIR = ['%{packname}']
DOCS = ['doc', 'DESCRIPTION', 'NEWS', 'CITATION']
URLS = [
'http://www.bioconductor.org/packages/release/data/'
'experiment/src/contrib/PACKAGES',
'http://www.bioconductor.org/packages/release/data/'
'annotation/src/contrib/PACKAGES',
'http://www.bioconductor.org/packages/release/bioc/'
'src/contrib/PACKAGES',
'http://cran.at.r-project.org/src/contrib/PACKAGES',
'http://r-forge.r-project.org/src/contrib/PACKAGES',
]
def __init__(self, base):
CheckBase.__init__(self, base, __file__)
def get_upstream_r_package_version(self):
""" Browse the PACKAGE file of the different repo to find the
latest version number of the given package name.
"""
name = self.spec.name[2:]
versionok = []
version = None
for url in self.URLS:
try:
stream = urllib.urlopen(url)
content = stream.read()
stream.close()
except IOError, err:
self.log.warning('Could not retrieve info from ' + url)
self.log.debug('Error: %s' % err, exc_info=True)
continue
res = re.search('Package: %s\nVersion:.*' % name, content)
if res is not None:
self.log.debug("Found in: %s" % url)
versionok.append(url)
if version is None:
ver = res.group().split('\n')[1]
version = ver.replace('Version:', '').strip()
else:
self.log.warning(
" * Found two version of the package in %s"
% (" ".join(versionok)))
return version
class RCheckBuildRequires(RCheckBase):
""" Check if the BuildRequires have the mandatory elements. """
def __init__(self, base):
""" Instanciate check variable """
RCheckBase.__init__(self, base)
self.url = 'http://fedoraproject.org/wiki/Packaging:R'
self.text = 'Package contains the mandatory BuildRequires.'
self.automatic = True
def run_on_applicable(self):
""" Run the check """
brs = self.spec.build_requires
tocheck = ['R-devel', 'tex(latex)']
if set(tocheck).intersection(set(brs)):
self.set_passed(self.PASS)
else:
self.set_passed(self.FAIL,
'Missing BuildRequires on %s'
% ', '.join(set(tocheck).difference(set(brs))))
class RCheckRequires(RCheckBase):
""" Check if the Requires have R-core. """
def __init__(self, base):
""" Instanciate check variable """
RCheckBase.__init__(self, base)
self.url = 'http://fedoraproject.org/wiki/Packaging:R'
self.text = 'Package requires R-core.'
self.automatic = True
def run_on_applicable(self):
""" Run the check """
brs = self.spec.get_requires()
if 'R' in brs and 'R-core' not in brs:
self.set_passed(self.FAIL,
"Package should requires R-core rather than R")
else:
self.set_passed('R-core' in brs)
class RCheckDoc(RCheckBase):
""" Check if the package has the usual %doc. """
def __init__(self, base):
""" Instanciate check variable """
RCheckBase.__init__(self, base)
self.url = 'http://fedoraproject.org/wiki/Packaging:R'
self.automatic = True
self.text = 'Package have the default element marked as %%doc :'
def run_on_applicable(self):
""" Run the check """
doc_found = []
for doc in self.DOCS:
if self.checks.rpms.find("*" + doc):
doc_found.append(doc)
docs = self.spec.find_all_re("%doc.*")
self.text += ", ".join(doc_found)
for entry in docs:
entry = os.path.basename(entry).strip()
if str(entry) in doc_found:
doc_found.remove(entry)
self.set_passed(doc_found == [])
class RCheckLatestVersionIsPackaged(RCheckBase):
""" Check if the last version of the R package is the one proposed """
deprecates = ['CheckLatestVersionIsPackaged']
def __init__(self, base):
""" Instanciate check variable """
RCheckBase.__init__(self, base)
self.url = 'https://fedoraproject.org/wiki/Packaging:Guidelines'
self.text = 'Latest version is packaged.'
self.automatic = True
self.type = 'SHOULD'
def run_on_applicable(self):
""" Run the check """
cur_version = self.spec.expand_tag('Version')
up_version = self.get_upstream_r_package_version()
if up_version is None:
self.set_passed(
self.PENDING,
'The package does not come from one of the standard sources')
return
up_version = up_version.replace('-', '.')
self.set_passed(
up_version == cur_version,
"Latest upstream version is %s, packaged version is %s"
% (up_version, cur_version))
class RCheckCheckMacro(RCheckBase):
""" Check if the section %check is present in the spec """
def __init__(self, base):
""" Instantiate check variable """
RCheckBase.__init__(self, base)
self.url = 'https://fedoraproject.org/wiki/Packaging:Guidelines'
self.text = 'The %check macro is present'
self.automatic = True
self.type = 'SHOULD'
def run_on_applicable(self):
""" Run the check """
sec_check = self.spec.get_section('%check')
self.set_passed(bool(sec_check))
class RCheckInstallSection(RCheckBase):
""" Check if the build section follows the expected behavior """
def __init__(self, base):
""" Instanciate check variable """
RCheckBase.__init__(self, base)
self.url = 'http://fedoraproject.org/wiki/Packaging:R'
self.text = 'The package has the standard %install section.'
self.automatic = True
self.type = 'MUST'
def run_on_applicable(self):
""" Run the check """
b_dir = False
b_test = False
b_rm = False
b_install = False
section = self.spec.get_section('%install')
if not section:
self.set_passed(self.FAIL)
return
for line in section:
if 'mkdir -p' in line and \
('/R/library' in line or 'rlibdir' in line):
b_dir = True
if rpm.expandMacro("test -d %{packname}/src && "
"(cd %{packname}/src; rm -f *.o *.so)") in line:
b_test = True
if 'rm' in line and 'R.css' in line:
b_rm = True
if 'R CMD INSTALL' in line \
and '-l ' in line \
and rpm.expandMacro('%{packname}') in line \
and ('/R/library' in line or 'rlibdir' in line):
b_install = True
if b_dir and b_test and b_rm and b_install:
self.set_passed(self.PASS)
else:
cmt = ''
if not b_dir:
cmt += "Package doesn't have the standard " \
"directory creation.\n"
if not b_test:
cmt += "Package doesn't have the standard " \
"removal of *.o and *.so.\n"
if not b_rm:
cmt += "Package doesn't have the standard " \
"removal of the R.css file\n"
if not b_install:
cmt += "Package doesn't have the standard " \
"R CMD INSTALL function\n"
self.set_passed(self.FAIL, cmt)
# vim: set expandtab ts=4 sw=4:
|
timlau/FedoraReview
|
plugins/R.py
|
Python
|
gpl-2.0
| 8,530
|
[
"Bioconductor"
] |
d64e8ccb8275514c8f0ebb9ad14a21f5af1b23080c4020ad943123d4e79c5f97
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, getdate, cint, cstr
from frappe import throw, _
from erpnext.utilities.transaction_base import TransactionBase, delete_events
from erpnext.stock.utils import get_valid_serial_nos
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
class MaintenanceSchedule(TransactionBase):
def generate_schedule(self):
self.set('schedules', [])
frappe.db.sql("""delete from `tabMaintenance Schedule Detail`
where parent=%s""", (self.name))
count = 1
for d in self.get('items'):
self.validate_maintenance_detail()
s_list = []
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits, d.sales_person)
for i in range(d.no_of_visits):
child = self.append('schedules')
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count + 1
child.sales_person = d.sales_person
self.save()
def on_submit(self):
if not self.get('schedules'):
throw(_("Please click on 'Generate Schedule' to get schedule"))
self.check_serial_no_added()
self.validate_schedule()
email_map = {}
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
no_email_sp = []
if d.sales_person not in email_map:
sp = frappe.get_doc("Sales Person", d.sales_person)
try:
email_map[d.sales_person] = sp.get_email_id()
except frappe.ValidationError:
no_email_sp.append(d.sales_person)
if no_email_sp:
frappe.msgprint(
frappe._("Setting Events to {0}, since the Employee attached to the below Sales Persons does not have a User ID{1}").format(
self.owner, "<br>" + "<br>".join(no_email_sp)
))
scheduled_date = frappe.db.sql("""select scheduled_date from
`tabMaintenance Schedule Detail` where sales_person=%s and item_code=%s and
parent=%s""", (d.sales_person, d.item_code, self.name), as_dict=1)
for key in scheduled_date:
description =frappe._("Reference: {0}, Item Code: {1} and Customer: {2}").format(self.name, d.item_code, self.customer)
frappe.get_doc({
"doctype": "Event",
"owner": email_map.get(d.sales_person, self.owner),
"subject": description,
"description": description,
"starts_on": cstr(key["scheduled_date"]) + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_permissions=1)
frappe.db.set(self, 'status', 'Submitted')
def create_schedule_list(self, start_date, end_date, no_of_visit, sales_person):
schedule_list = []
start_date_copy = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff / no_of_visit
for visit in range(cint(no_of_visit)):
if (getdate(start_date_copy) < getdate(end_date)):
start_date_copy = add_days(start_date_copy, add_by)
if len(schedule_list) < no_of_visit:
schedule_date = self.validate_schedule_date_for_holiday_list(getdate(start_date_copy),
sales_person)
if schedule_date > getdate(end_date):
schedule_date = getdate(end_date)
schedule_list.append(schedule_date)
return schedule_list
def validate_schedule_date_for_holiday_list(self, schedule_date, sales_person):
validated = False
employee = frappe.db.get_value("Sales Person", sales_person, "employee")
if employee:
holiday_list = get_holiday_list_for_employee(employee)
else:
holiday_list = frappe.get_cached_value('Company', self.company, "default_holiday_list")
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday` where parent=%s''', holiday_list)
if not validated and holidays:
# max iterations = len(holidays)
for i in range(len(holidays)):
if schedule_date in holidays:
schedule_date = add_days(schedule_date, -1)
else:
validated = True
break
return schedule_date
def validate_dates_with_periodicity(self):
for d in self.get("items"):
if d.start_date and d.end_date and d.periodicity and d.periodicity!="Random":
date_diff = (getdate(d.end_date) - getdate(d.start_date)).days + 1
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 90,
"Half Yearly": 180,
"Yearly": 365
}
if date_diff < days_in_period[d.periodicity]:
throw(_("Row {0}: To set {1} periodicity, difference between from and to date \
must be greater than or equal to {2}")
.format(d.idx, d.periodicity, days_in_period[d.periodicity]))
def validate_maintenance_detail(self):
if not self.get('items'):
throw(_("Please enter Maintaince Details first"))
for d in self.get('items'):
if not d.item_code:
throw(_("Please select item code"))
elif not d.start_date or not d.end_date:
throw(_("Please select Start Date and End Date for Item {0}".format(d.item_code)))
elif not d.no_of_visits:
throw(_("Please mention no of visits required"))
elif not d.sales_person:
throw(_("Please select a Sales Person for item: {0}".format(d.item_name)))
if getdate(d.start_date) >= getdate(d.end_date):
throw(_("Start date should be less than end date for Item {0}").format(d.item_code))
def validate_sales_order(self):
for d in self.get('items'):
if d.sales_order:
chk = frappe.db.sql("""select ms.name from `tabMaintenance Schedule` ms,
`tabMaintenance Schedule Item` msi where msi.parent=ms.name and
msi.sales_order=%s and ms.docstatus=1""", d.sales_order)
if chk:
throw(_("Maintenance Schedule {0} exists against {1}").format(chk[0][0], d.sales_order))
def validate(self):
self.validate_maintenance_detail()
self.validate_dates_with_periodicity()
self.validate_sales_order()
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_doc = frappe.get_doc("Serial No", serial_no)
serial_no_doc.amc_expiry_date = amc_expiry_date
serial_no_doc.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = frappe.db.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "warehouse", "delivery_date"], as_dict=1)
if not sr_details:
frappe.throw(_("Serial No {0} not found").format(serial_no))
if sr_details.warranty_expiry_date \
and getdate(sr_details.warranty_expiry_date) >= getdate(amc_start_date):
throw(_("Serial No {0} is under warranty upto {1}")
.format(serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and getdate(sr_details.amc_expiry_date) >= getdate(amc_start_date):
throw(_("Serial No {0} is under maintenance contract upto {1}")
.format(serial_no, sr_details.amc_expiry_date))
if not sr_details.warehouse and sr_details.delivery_date and \
getdate(sr_details.delivery_date) >= getdate(amc_start_date):
throw(_("Maintenance start date can not be before delivery date for Serial No {0}")
.format(serial_no))
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in self.get('items'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in self.get('schedules'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
throw(_("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'"))
else:
for x in item_lst1:
if x not in item_lst2:
throw(_("Please click on 'Generate Schedule'"))
def check_serial_no_added(self):
serial_present =[]
for d in self.get('items'):
if d.serial_no:
serial_present.append(d.item_code)
for m in self.get('schedules'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
throw(_("Please click on 'Generate Schedule' to fetch Serial No added for Item {0}").format(m.item_code))
def on_cancel(self):
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
frappe.db.set(self, 'status', 'Cancelled')
delete_events(self.doctype, self.name)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doc("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"sales_person": "service_person"
}
}
}, target_doc)
return doclist
|
neilLasrado/erpnext
|
erpnext/maintenance/doctype/maintenance_schedule/maintenance_schedule.py
|
Python
|
gpl-3.0
| 9,352
|
[
"VisIt"
] |
2c47640d9d5c5e3b68a344859c2212c2a1f885095390d72951f2693a0c4409cc
|
"""
Tests for `flask_openapi.utils`.
"""
import pytest
from flask_openapi import utils
@pytest.mark.parametrize('data,key,value,expected', [
({}, 'foo', None, {}),
({}, 'foo', {}, {}),
({}, 'foo', 'bar', {'foo': 'bar'})
])
def test_add_optional(data, key, value, expected):
"""
Test if a value is added to the dict if it is not None.
"""
utils.add_optional(data, key, value)
assert data == expected
@pytest.mark.parametrize('url,path,parameters', [
('/items/<id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'string'
}]),
('/items/<string:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'string'
}]),
('/items/<int:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'integer'
}]),
('/items/<float:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'number'
}]),
('/items/<path:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'string'
}]),
('/items/<any:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'string'
}]),
('/items/<uuid:id>', '/items/{id}', [{
'name': 'id',
'in': 'path',
'required': True,
'type': 'string'
}])
])
def test_parse_werkzeug_url(url, path, parameters):
"""
Test if parse_werkzeug_url processes the url and converts types.
"""
result_path, result_parameters = utils.parse_werkzeug_url(url)
assert result_path == path
assert result_parameters == parameters
@pytest.mark.parametrize('input,expected', [
('Oak <oak@pallettown.kanto> (http://pallettown.kanto/oak)', {
'name': 'Oak',
'email': 'oak@pallettown.kanto',
'url': 'http://pallettown.kanto/oak'
}),
('Elm <elm@newbarktown.johto>', {
'name': 'Elm',
'email': 'elm@newbarktown.johto'
}),
('Birch (http://littleroottown.hoenn/birch)', {
'name': 'Birch',
'url': 'http://littleroottown.hoenn/birch'
}),
('<>', {}),
('', {})
])
def test_parse_contact_string(input, expected):
"""
Test if contact information is properly extracted.
"""
result = utils.parse_contact_string(input)
assert result == expected
def test_ref():
"""
Test if args are converted to a JSON reference.
"""
result = utils.ref('galaxy', 'milkyWay', 'sun', 'earth')
assert result == {
'$ref': '#/galaxy/milkyWay/sun/earth'
}
|
remcohaszing/flask-openapi
|
flask_openapi/utils_test.py
|
Python
|
mit
| 2,724
|
[
"Galaxy"
] |
be1d38b5b59a43478d64e2ceb31ef3f1a40d73f74141c4293536cf77fb71eb10
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# This file is part of pyAlienFX.
#
# pyAlienFX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyAlienFX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyAlienFX. If not, see <http://www.gnu.org/licenses/>.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter
# to Creative Commons, 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.
#
import os
import sys
import platform
dist = platform.dist()[0]
if os.getuid() != 0:
print "You must launch the installer script as root !"
sys.exit(1)
BasePath = os.path.realpath('.')
thanksmsg = "Thanks !\nDevelopped by \033[1;30mXqua\033[0m"
print """
\033[1;32mWelcome to the \033[0m\033[1;31mpyAlienFX\033[0m\033[1;32m Installer script !\033[0m
You are about to configure the software : pyAlienFX !"""
if len(sys.argv) > 1:
if sys.argv[1] != "deb":
n = 0
while True:
q = raw_input("Do you want to continue \033[1;31m(Y/N)\033[0m ? ")
if q.lower() == "n":
print thanksmsg
sys.exit(0)
elif q.lower() == "y":
break
elif n == 3:
print thanksmsg
sys.exit(0)
else:
print "Please enter Y or N !"
n += 1
else:
BasePath = "/usr/share/pyAlienFX"
if not os.path.isdir(BasePath):
os.mkdir(BasePath)
print """
\033[1;31m !!! WARNING !!!\033[0m
The current version is packaged with a deamon running in the background as a TCP/IP server to control the lights.
First, this might/will cause trouble under windows systems
Second, this functionality is still in the Alpha stage and might cause unexpected bugs.
It is reccomended that you do not start the deamon automatically as you can still test it by launching the pyAlienFX_daemon.py script and then restarting the other pyAlienFX scripts."""
while True:
q = raw_input('Do you wish to launch the deamon at startup ? \033[1;31m(Y/N)\033[0m ')
if q.lower() == "y":
optdeamon = ""
break
elif q.lower() == "n":
optdeamon = "#"
break
elif q.lower() == "":
optdeamon = "#"
break
else:
print "Please answer Y or N (N)"
Bin = """#!/bin/sh
# -*- coding: UTF-8 -*-
#This file is part of pyAlienFX.
#
# pyAlienFX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyAlienFX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyAlienFX. If not, see <http://www.gnu.org/licenses/>.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter
# to Creative Commons, 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.
#
#THIS FILE IS GENERATED WITH THE install.py file ! Do not modify it !
cd %s
gksudo ./pyAlienFX_Launcher.sh
""" % (BasePath)
Launcher = """#!/bin/sh
# -*- coding: UTF-8 -*-
#This file is part of pyAlienFX.
#
# pyAlienFX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyAlienFX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyAlienFX. If not, see <http://www.gnu.org/licenses/>.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter
# to Creative Commons, 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.
#
# This file will launch the deamon and the indicator applet !
# You should add it to your Session Auto Launch for a better experience !
#THIS FILE IS GENERATED WITH THE install.py file ! Do not modify it !
cd %s
%spython ./pyAlienFX_daemon.py &
%ssleep 5
python ./pyAlienFX_Indicator.py &
""" % (BasePath, optdeamon, optdeamon)
Unity = """[Desktop Entry]
Name=pyAlienFX
Comment=Launch the pyAlienFX Configurator
TryExec=pyAlienFX
Exec=pyAlienFX
Icon=%s/images/icon.png
Type=Application
Categories=Utility;
StartupNotify=true
OnlyShowIn=GNOME;Unity;
""" % (BasePath)
try:
f = open('/usr/share/applications/pyAlienFX.desktop', 'w')
f.write(Unity)
f.close()
if dist == "Ubuntu":
f = open('/etc/xdg/autostart/pyAlienFX.desktop', 'w')
f.write(Unity)
f.close()
except:
print "\033[1;31m !!! Please run the script as sudo in order to install the script in the Unity interface !!! \033[0m"
# os.setuid(1000)
# os.setgid(1001)
f = open('%s/pyAlienFX_Launcher.sh' % BasePath, 'w')
f.write(Launcher)
f.close()
os.system('chmod 755 %s/pyAlienFX_Launcher.sh' % BasePath)
try:
f = open('/usr/bin/pyAlienFX', 'w')
f.write(Bin)
f.close()
os.system('chmod 755 /usr/bin/pyAlienFX')
except:
f = open('%s/pyAlienFX' % BasePath, 'w')
f.write(Bin)
f.close()
os.system('chmod 755 %s/pyAlienFX' % BasePath)
print "\033[1;31m !!! Please run the script as sudo in order to install the script correctly !!! \033[0m"
print "Thanks for installing !\n%s" % thanksmsg
|
jrobeson/pyalienfx
|
install.py
|
Python
|
gpl-3.0
| 6,691
|
[
"VisIt"
] |
555dfac393013a15bc5de530c8f331113d4eac8cbc8d5606a59af2f753b54a4d
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.random_org as random_org
from salt.ext.tornado.httpclient import HTTPClient
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
def check_status():
"""
Check the status of random.org
"""
try:
return HTTPClient().fetch("https://api.random.org/").code == 200
except Exception: # pylint: disable=broad-except
return False
@skipIf(True, "WAR ROOM 7/31/2019, test needs to allow for quotas of random website")
class RandomOrgTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.random_org
"""
def setup_loader_modules(self):
return {random_org: {}}
def setUp(self):
if check_status() is False:
self.skipTest("External resource 'https://api.random.org/' not available")
# 'getUsage' function tests: 1
def test_getusage(self):
"""
Test if it show current usages statistics.
"""
ret = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.getUsage(), ret)
self.assertDictEqual(
random_org.getUsage(api_key="peW", api_version="1"),
{
"bitsLeft": None,
"requestsLeft": None,
"res": True,
"totalBits": None,
"totalRequests": None,
},
)
# 'generateIntegers' function tests: 1
def test_generateintegers(self):
"""
Test if it generate random integers.
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateIntegers(), ret1)
ret2 = {"message": "Rquired argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateIntegers(api_key="peW", api_version="1"), ret2
)
ret3 = {
"message": "Number of integers must be between 1 and 10000",
"res": False,
}
self.assertDictEqual(
random_org.generateIntegers(
api_key="peW", api_version="1", number="5", minimum="1", maximum="6"
),
ret3,
)
ret4 = {
"message": (
"Minimum argument must be between -1,000,000,000 and 1,000,000,000"
),
"res": False,
}
self.assertDictEqual(
random_org.generateIntegers(
api_key="peW", api_version="1", number=5, minimum="1", maximum="6"
),
ret4,
)
ret5 = {
"message": (
"Maximum argument must be between -1,000,000,000 and 1,000,000,000"
),
"res": False,
}
self.assertDictEqual(
random_org.generateIntegers(
api_key="peW", api_version="1", number=5, minimum=1, maximum="6"
),
ret5,
)
ret6 = {"message": "Base must be either 2, 8, 10 or 16.", "res": False}
self.assertDictEqual(
random_org.generateIntegers(
api_key="peW", api_version="1", number=5, minimum=1, maximum=6, base="2"
),
ret6,
)
ret7 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateIntegers(
api_key="peW", api_version="1", number=5, minimum=1, maximum=6, base=2
),
ret7,
)
# 'generateStrings' function tests: 1
def test_generatestrings(self):
"""
Test if it generate random strings.
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateStrings(), ret1)
ret2 = {"message": "Required argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateStrings(api_key="peW", api_version="1"), ret2
)
ret3 = {
"message": "Number of strings must be between 1 and 10000",
"res": False,
}
char = "abcdefghijklmnopqrstuvwxyz"
self.assertDictEqual(
random_org.generateStrings(
api_key="peW", api_version="1", number="5", length="8", characters=char
),
ret3,
)
ret3 = {"message": "Length of strings must be between 1 and 20", "res": False}
self.assertDictEqual(
random_org.generateStrings(
api_key="peW", api_version="1", number=5, length="8", characters=char
),
ret3,
)
ret3 = {"message": "Length of characters must be less than 80.", "res": False}
self.assertDictEqual(
random_org.generateStrings(
api_key="peW", api_version="1", number=5, length=8, characters=char * 4
),
ret3,
)
ret3 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateStrings(
api_key="peW", api_version="1", number=5, length=8, characters=char
),
ret3,
)
# 'generateUUIDs' function tests: 1
def test_generateuuids(self):
"""
Test if it generate a list of random UUIDs.
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateUUIDs(), ret1)
ret2 = {"message": "Required argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateUUIDs(api_key="peW", api_version="1"), ret2
)
ret3 = {"message": "Number of UUIDs must be between 1 and 1000", "res": False}
self.assertDictEqual(
random_org.generateUUIDs(api_key="peW", api_version="1", number="5"), ret3
)
ret3 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateUUIDs(api_key="peW", api_version="1", number=5), ret3
)
# 'generateDecimalFractions' function tests: 1
@pytest.mark.flaky(max_runs=4)
def test_generatedecimalfractions(self):
"""
Test if it generates true random decimal fractions.
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateDecimalFractions(), ret1)
ret2 = {"message": "Required argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateDecimalFractions(api_key="peW", api_version="1"), ret2
)
ret3 = {
"message": "Number of decimal fractions must be between 1 and 10000",
"res": False,
}
self.assertDictEqual(
random_org.generateDecimalFractions(
api_key="peW",
api_version="1",
number="5",
decimalPlaces="4",
replacement=True,
),
ret3,
)
ret4 = {
"message": "Number of decimal places must be between 1 and 20",
"res": False,
}
self.assertDictEqual(
random_org.generateDecimalFractions(
api_key="peW",
api_version="1",
number=5,
decimalPlaces="4",
replacement=True,
),
ret4,
)
ret5 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateDecimalFractions(
api_key="peW",
api_version="1",
number=5,
decimalPlaces=4,
replacement=True,
),
ret5,
)
# 'generateGaussians' function tests: 1
@pytest.mark.flaky(max_runs=4)
def test_generategaussians(self):
"""
Test if it generates true random numbers from a
Gaussian distribution (also known as a normal distribution).
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateGaussians(), ret1)
ret2 = {"message": "Required argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateGaussians(api_key="peW", api_version="1"), ret2
)
ret3 = {
"message": "Number of decimal fractions must be between 1 and 10000",
"res": False,
}
self.assertDictEqual(
random_org.generateGaussians(
api_key="peW",
api_version="1",
number="5",
mean="0.0",
standardDeviation="1.0",
significantDigits="8",
),
ret3,
)
ret4 = {
"message": "The distribution's mean must be between -1000000 and 1000000",
"res": False,
}
self.assertDictEqual(
random_org.generateGaussians(
api_key="peW",
api_version="1",
number=5,
mean="0.0",
standardDeviation="1.0",
significantDigits="8",
),
ret4,
)
ret5 = {
"message": (
"The distribution's standard deviation must be"
" between -1000000 and 1000000"
),
"res": False,
}
self.assertDictEqual(
random_org.generateGaussians(
api_key="peW",
api_version="1",
number=5,
mean=0.0,
standardDeviation="1.0",
significantDigits="8",
),
ret5,
)
ret6 = {
"message": "The number of significant digits must be between 2 and 20",
"res": False,
}
self.assertDictEqual(
random_org.generateGaussians(
api_key="peW",
api_version="1",
number=5,
mean=0.0,
standardDeviation=1.0,
significantDigits="8",
),
ret6,
)
ret7 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateGaussians(
api_key="peW",
api_version="1",
number=5,
mean=0.0,
standardDeviation=1.0,
significantDigits=8,
),
ret7,
)
# 'generateBlobs' function tests: 1
def test_generateblobs(self):
"""
Test if it list all Slack users.
"""
ret1 = {"message": "No Random.org api key or api version found.", "res": False}
self.assertDictEqual(random_org.generateBlobs(), ret1)
ret2 = {"message": "Required argument, number is missing.", "res": False}
self.assertDictEqual(
random_org.generateBlobs(api_key="peW", api_version="1"), ret2
)
ret3 = {"message": "Number of blobs must be between 1 and 100", "res": False}
self.assertDictEqual(
random_org.generateBlobs(
api_key="peW", api_version="1", number="5", size="1"
),
ret3,
)
ret4 = {"message": "Number of blobs must be between 1 and 100", "res": False}
self.assertDictEqual(
random_org.generateBlobs(api_key="peW", api_version="1", number=5, size=1),
ret4,
)
ret5 = {"message": "Format must be either base64 or hex.", "res": False}
self.assertDictEqual(
random_org.generateBlobs(
api_key="peW", api_version="1", number=5, size=8, format="oct"
),
ret5,
)
ret6 = {"message": "Parameter 'apiKey' is malformed", "res": False}
self.assertDictEqual(
random_org.generateBlobs(
api_key="peW", api_version="1", number=5, size=8, format="hex"
),
ret6,
)
|
saltstack/salt
|
tests/unit/modules/test_random_org.py
|
Python
|
apache-2.0
| 12,483
|
[
"Gaussian"
] |
10d38055fc3d90de780a9016398464d687205cd094ce004685dd252565fc24f3
|
#!/usr/bin/env python
"""A simple example demonstrating TVTK. This example is basically a
translation of the VTK tutorial demo available in
VTK/Examples/Tutorial/Step6/Python/Cone6.py.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2004-2006, Enthought, Inc.
# License: BSD Style.
from tvtk.api import tvtk
from tvtk.common import configure_input_data
# Create a cone source and configure it.
cs = tvtk.ConeSource(height=3.0, radius=1.0, resolution=36)
# Print the traits of the cone.
#cs.print_traits()
# Setup the rest of the pipeline.
m = tvtk.PolyDataMapper()
# Note that VTK's GetOutput method is special because it has two call
# signatures: GetOutput() and GetOutput(int N) (which gets the N'th
# output). In tvtk it is represented as both a property and as a
# method. Using the output property will work fine if all you want is
# the default output. OTOH if you want the N'th output use
# get_output(N).
# m.input = cs.output # or m.input = cs.get_output()
configure_input_data(m, cs.output)
cs.update()
# Create the actor and set its mapper.
a = tvtk.Actor(mapper=m)
# Create a Renderer, add the actor and set its background color.
ren = tvtk.Renderer(background=(0.1, 0.2, 0.4))
ren.add_actor(a)
# Create a RenderWindow, add the renderer and set its size.
rw = tvtk.RenderWindow(size=(300,300))
rw.add_renderer(ren)
# Create the RenderWindowInteractor
rwi = tvtk.RenderWindowInteractor(render_window=rw)
# Setup a box widget.
bw = tvtk.BoxWidget(interactor=rwi, place_factor=1.25,
prop3d=a)
bw.place_widget()
def callback(widget, event):
"""This callback sets the transformation of the cone using that
setup by the the box."""
t = tvtk.Transform()
bw.get_transform(t)
bw.prop3d.user_transform = t
# Add an observer
bw.add_observer("InteractionEvent", callback)
# Turn on the box interaction. The default is off and can be toggled
# by pressing 'i' on the RenderWindowInteractor.
bw.on()
# Start the VTK event loop.
rwi.initialize()
rwi.start()
|
dmsurti/mayavi
|
examples/tvtk/simple.py
|
Python
|
bsd-3-clause
| 2,042
|
[
"VTK"
] |
014d7204ff683037858739a2c1b5e23e1a2d959ab6921292996a629f25253d07
|
'''
=============================
EM for Linear-Gaussian Models
=============================
This example shows how one may use the EM algorithm to estimate model
parameters with a Kalman Filter.
The EM algorithm is a meta-algorithm for learning parameters in probabilistic
models. The algorithm works by first fixing the parameters and finding a closed
form distribution over the unobserved variables, then finds new parameters that
maximize the expected likelihood of the observed variables (where the
expectation is taken over the unobserved ones). Due to convexity arguments, we
are guaranteed that each iteration of the algorithm will increase the
likelihood of the observed data and that it will eventually reach a local
optimum.
The EM algorithm is applied to the Linear-Gaussian system (that is, the model
assumed by the Kalman Filter) by first using the Kalman Smoother to calculate
the distribution over all unobserved variables (in this case, the hidden target
states), then closed-form update equations are used to update the model
parameters.
The first figure plotted contains 4 sets of lines. The first, labeled `true`,
represents the true, unobserved state of the system. The second, labeled
`blind`, represents the predicted state of the system if no measurements are
incorporated. The third, labeled `filtered`, are the state estimates given
measurements up to and including the current time step. Finally, the fourth,
labeled `smoothed`, are the state estimates using all observations for all time
steps. The latter three estimates use parameters learned via 10 iterations of
the EM algorithm.
The second figure contains a single line representing the likelihood of the
observed data as a function of the EM Algorithm iteration.
'''
import numpy as np
import pylab as pl
from pykalman.datasets import load_robot
from pykalman import KalmanFilter
# Load data and initialize Kalman Filter
data = load_robot()
kf = KalmanFilter(
data.transition_matrix,
data.observation_matrix,
data.initial_transition_covariance,
data.initial_observation_covariance,
data.transition_offsets,
data.observation_offset,
data.initial_state_mean,
data.initial_state_covariance,
em_vars=[
'transition_matrices', 'observation_matrices',
'transition_covariance', 'observation_covariance',
'observation_offsets', 'initial_state_mean',
'initial_state_covariance'
]
)
# Learn good values for parameters named in `em_vars` using the EM algorithm
loglikelihoods = np.zeros(10)
for i in range(len(loglikelihoods)):
kf = kf.em(X=data.observations, n_iter=1)
loglikelihoods[i] = kf.loglikelihood(data.observations)
# Estimate the state without using any observations. This will let us see how
# good we could do if we ran blind.
n_dim_state = data.transition_matrix.shape[0]
n_timesteps = data.observations.shape[0]
blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
for t in range(n_timesteps - 1):
if t == 0:
blind_state_estimates[t] = kf.initial_state_mean
blind_state_estimates[t + 1] = (
np.dot(kf.transition_matrices, blind_state_estimates[t])
+ kf.transition_offsets[t]
)
# Estimate the hidden states using observations up to and including
# time t for t in [0...n_timesteps-1]. This method outputs the mean and
# covariance characterizing the Multivariate Normal distribution for
# P(x_t | z_{1:t})
filtered_state_estimates = kf.filter(data.observations)[0]
# Estimate the hidden states using all observations. These estimates
# will be 'smoother' (and are to be preferred) to those produced by
# simply filtering as they are made with later observations in mind.
# Probabilistically, this method produces the mean and covariance
# characterizing,
# P(x_t | z_{1:n_timesteps})
smoothed_state_estimates = kf.smooth(data.observations)[0]
# Draw the true, blind,e filtered, and smoothed state estimates for all 5
# dimensions.
pl.figure(figsize=(16, 6))
lines_true = pl.plot(data.states, linestyle='-', color='b')
lines_blind = pl.plot(blind_state_estimates, linestyle=':', color='m')
lines_filt = pl.plot(filtered_state_estimates, linestyle='--', color='g')
lines_smooth = pl.plot(smoothed_state_estimates, linestyle='-.', color='r')
pl.legend(
(lines_true[0], lines_blind[0], lines_filt[0], lines_smooth[0]),
('true', 'blind', 'filtered', 'smoothed')
)
pl.xlabel('time')
pl.ylabel('state')
pl.xlim(xmax=500)
# Draw log likelihood of observations as a function of EM iteration number.
# Notice how it is increasing (this is guaranteed by the EM algorithm)
pl.figure()
pl.plot(loglikelihoods)
pl.xlabel('em iteration number')
pl.ylabel('log likelihood')
pl.show()
|
PierrotLC/pykalman
|
examples/standard/plot_em.py
|
Python
|
bsd-3-clause
| 4,699
|
[
"Gaussian"
] |
4e0f90ab5ba93bdef5bf8406bf89b4088a74a44b8e9d2ab1f21bf5fd80a025ff
|
r"""
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference carefully.
Definition
----------
The 1D scattering intensity for this model is calculated according to
the equations given by Pedersen (Pedersen, 2000), summarised briefly here.
The micelle core is imagined as $N$ = *n_aggreg* polymer heads, each of volume
$V_\text{core}$, which then defines a micelle core of radius $r$ = *r_core*,
which is a separate parameter even though it could be directly determined.
The Gaussian random coil tails, of gyration radius $R_g$, are imagined
uniformly distributed around the spherical core, centred at a distance
$r + d \cdot R_g$ from the micelle centre, where $d$ = *d_penetration* is
of order unity. A volume $V_\text{corona}$ is defined for each coil. The
model in detail seems to separately parameterize the terms for the shape
of $I(Q)$ and the relative intensity of each term, so use with caution
and check parameters for consistency. The spherical core is monodisperse,
so it's intensity and the cross terms may have sharp oscillations (use $q$
resolution smearing if needs be to help remove them).
.. math::
P(q) &= N^2\beta^2_s\Phi(qr)^2 + N\beta^2_cP_c(q)
+ 2N^2\beta_s\beta_cS_{sc}(q) + N(N-1)\beta_c^2S_{cc}(q) \\
\beta_s &= V_\text{core}(\rho_\text{core} - \rho_\text{solvent}) \\
\beta_c &= V_\text{corona}(\rho_\text{corona} - \rho_\text{solvent})
where $\rho_\text{core}$, $\rho_\text{corona}$ and $\rho_\text{solvent}$ are
the scattering length densities *sld_core*, *sld_corona* and *sld_solvent*.
For the spherical core of radius $r$
.. math::
\Phi(qr)= \frac{\sin(qr) - qr\cos(qr)}{(qr)^3}
whilst for the Gaussian coils
.. math::
P_c(q) &= 2 [\exp(-Z) + Z - 1] / Z^2 \\
Z &= (q R_g)^2
The sphere to coil (core to corona) and coil to coil (corona to corona) cross
terms are approximated by:
.. math::
S_{sc}(q) &= \Phi(qr)\psi(Z)
\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \\
S_{cc}(q) &= \psi(Z)^2
\left[\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \right]^2 \\
\psi(Z) &= \frac{[1-\exp^{-Z}]}{Z}
Validation
----------
$P(q)$ above is multiplied by *ndensity*, and a units conversion of $10^{-13}$,
so *scale* is likely 1.0 if the scattering data is in absolute units. This
model has not yet been independently validated.
References
----------
#. J Pedersen, *J. Appl. Cryst.*, 33 (2000) 637-640
Authorship and Verification
----------------------------
* **Translated by :** Richard Heenan **Date:** March 20, 2016
* **Last modified by:** Paul Kienzle **Date:** November 29, 2017
* **Last reviewed by:** Steve King **Date:** November 30, 2017
"""
import numpy as np
from numpy import inf, pi
name = "polymer_micelle"
title = "Polymer micelle model"
description = """
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference to Pedersen and full documentation carefully.
"""
category = "shape:sphere"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [
["ndensity", "1e15/cm^3", 8.94, [0.0, inf], "", "Number density of micelles"],
["v_core", "Ang^3", 62624.0, [0.0, inf], "", "Core volume "],
["v_corona", "Ang^3", 61940.0, [0.0, inf], "", "Corona volume"],
["sld_solvent", "1e-6/Ang^2", 6.4, [0.0, inf], "sld", "Solvent scattering length density"],
["sld_core", "1e-6/Ang^2", 0.34, [0.0, inf], "sld", "Core scattering length density"],
["sld_corona", "1e-6/Ang^2", 0.8, [0.0, inf], "sld", "Corona scattering length density"],
["radius_core", "Ang", 45.0, [0.0, inf], "", "Radius of core ( must be >> rg )"],
["rg", "Ang", 20.0, [0.0, inf], "", "Radius of gyration of chains in corona"],
["d_penetration", "", 1.0, [-inf, inf], "", "Factor to mimic non-penetration of Gaussian chains"],
["n_aggreg", "", 6.0, [-inf, inf], "", "Aggregation number of the micelle"],
]
# pylint: enable=bad-whitespace, line-too-long
single = False
source = ["lib/sas_3j1x_x.c", "polymer_micelle.c"]
def random():
"""Return a random parameter set for the model."""
radius_core = 10**np.random.uniform(1, 3)
rg = radius_core * 10**np.random.uniform(-2, -0.3)
d_penetration = np.random.randn()*0.05 + 1
n_aggreg = np.random.randint(3, 30)
# volume of head groups is the core volume over the number of groups,
# with a correction for packing fraction of the head groups.
v_core = 4*pi/3*radius_core**3/n_aggreg * 0.68
# Rg^2 for gaussian coil is a^2n/6 => a^2 = 6 Rg^2/n
# a=2r => r = Rg sqrt(3/2n)
# v = 4/3 pi r^3 n => v = 4/3 pi Rg^3 (3/2n)^(3/2) n = pi Rg^3 sqrt(6/n)
tail_segments = np.random.randint(6, 30)
v_corona = pi * rg**3 * np.sqrt(6/tail_segments)
V = 4*pi/3*(radius_core + rg)**3
pars = dict(
background=0,
scale=1e7/V,
ndensity=8.94,
v_core=v_core,
v_corona=v_corona,
radius_core=radius_core,
rg=rg,
d_penetration=d_penetration,
n_aggreg=n_aggreg,
)
return pars
tests = [
[{}, 0.01, 15.3532],
]
# RKH 20Mar2016 - need to check whether the core & corona volumes are per
# monomer ??? and how aggregation number works!
# renamed from micelle_spherical_core to polymer_micelle,
# moved from shape-independent to spheres section.
# Ought to be able to add polydisp to core? And add ability to x by S(Q) ?
|
SasView/sasmodels
|
sasmodels/models/polymer_micelle.py
|
Python
|
bsd-3-clause
| 5,987
|
[
"Gaussian"
] |
a95c720023ea8f5249fc196bd1faea38646ed6cef622d6c2b39f097edc580612
|
from ase import *
from hotbit import *
from hotbit import fixpar
import os
calc=Hotbit(SCC=True,width=0.05,txt='test.cal',parameters=fixpar)
atoms=Atoms('CO',positions=[(0,0,0),(1.13,0,0)],pbc=False)
atoms.center(vacuum=3)
atoms.set_calculator(calc)
atoms.get_potential_energy()
wf=calc.get_grid_wf(0,spacing=0.5)
write('wf0.cube',atoms,data=wf)
|
pekkosk/hotbit
|
hotbit/doc/examples/CO_wf.py
|
Python
|
gpl-2.0
| 349
|
[
"ASE"
] |
43cdf27b477e6578d36c3b9625b79b16c4b2e7736e628799f50cab8c497f9dcd
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
menghsuann/Tensorflow-Study-Group
|
partial_diff/cifar10.py
|
Python
|
gpl-3.0
| 14,534
|
[
"Gaussian"
] |
a3f06c128a9d599be4fb9da10866c4730f1a23b7bb436e35216226bb51fe1cfd
|
# /usr/bin/python3
# coding=utf-8
# Point Of No Return
"""
The game file, contains the game and the interface to the players.
@author: anon-42
@version: beta
"""
import tkinter as tk
import numpy as np
import ann
import act_func
import trainer
class Dot:
"""
Class for tdhe points the player can visit.
"""
def __init__(self, cvs, x, y, state=0):
self.cvs = cvs
self.x = x
self.y = y
self.graphic = self.cvs.create_oval(self.x * 30 + 40,
self.y * 30 + 25,
self.x * 30 + 50,
self.y * 30 + 35)
self.setstate(state)
def setstate(self, state): # -1 = bereits besucht, 0 = unbesetzt, 1 = aktuell besetzt
"""
Sets the state of Dot.
"""
self.state = state
self.cvs.itemconfigure(self.graphic, fill={-1: 'black',
0: 'white',
1: 'red'}[self.state])
class PONR:
"""
Main class of the game.
"""
def __init__(self, P1, P2, len_x=17, len_y=9, goal=5):
self.pos = [int(len_x / 2), int(len_y / 2)] # Position des Spielers
self.lines_data = [np.zeros((len_x - 1) * len_y), # waagerecht
np.zeros(len_x * (len_y - 1)), # senkrecht
np.zeros((len_x - 1) * (len_y - 1)), # diagonal lo ru
np.zeros((len_x - 1) * (len_y - 1))] # diagonal ro lu
self.touched_points = [self.pos]
self.diagonals = []
self.P1 = P1
self.P2 = P2
if len_x % 2 == len_y % 2 == goal % 2 == 1:
self.size = [len_x, len_y]
self.goal = goal
else:
raise ValueError('len_x, len_y and goal must be odd numbers.')
self.root = tk.Tk()
self.root.geometry(str(60 + self.size[0] * 30) + 'x' + str(30 + self.size[1] * 30))
self.root.resizable(width=False, height=False)
self.root.update_idletasks()
self.cvs = tk.Canvas(self.root,
bg='white',
height=self.root.winfo_height(),
width=self.root.winfo_width())
self.cvs.create_rectangle(0,
15 + 30 * (self.size[1] - self.goal) / 2,
20,
15 + 30 * (self.size[1] + self.goal) / 2,
fill='black')
self.cvs.create_rectangle(self.root.winfo_width() - 20,
15 + 30 * (self.size[1] - self.goal) / 2,
self.root.winfo_width(),
15 + 30 * (self.size[1] + self.goal) / 2,
fill='black')
self.cvs.pack()
self.Dots = []
for x in range(self.size[0]):
for y in range(self.size[1]):
self.Dots.append(Dot(self.cvs, x, y))
self.find_Dot(self.pos).setstate(1)
self.root.update_idletasks()
def start(self):
"""
Mainloop of the game.
"""
active, passive = self.P1, self.P2
while True:
for turn_number in range(3):
if self.player_turn(active, turn_number):
continue
else:
for turn_number in range(6):
self.player_turn(passive, turn_number, free_kick=True)
active, passive = passive, active
break
active, passive = passive, active
def find_Dot(self, pos):
"""
Finds a Dot object in self.Dots given its coordinates and returns it.
"""
for Dot in self.Dots:
if Dot.x == pos[0] and Dot.y == pos[1]:
return Dot
def connect(self, pos1, pos2):
"""
Draws a line to connect two dots.
"""
self.cvs.create_line(45 + pos1[0] * 30,
30 + pos1[1] * 30,
45 + pos2[0] * 30,
30 + pos2[1] * 30,
width=3)
def player_turn(self, player, turn_number, free_kick=False):
"""
Executes one player turn.
"""
if not free_kick:
self.root.title(player.name + ' ist am Zug')
else:
self.root.title('Freistoß für ' + player.name)
prev_pos = self.pos
if not free_kick and not self.can_move():
return False
_foo = [0, 0, 0, 0, 0, 0]
_foo[turn_number] = 1
_foo.append(int(free_kick))
step = player.get_input(self.root,
np.append(np.array(_foo), np.concatenate(self.lines_data)))
if player.type == 'com':
Qvalues = list(step.keys())
Qvalues.sort(reverse=True)
while len(Qvalues) > 0:
if self.rules(self.pos, [self.pos[0] + step[Qvalues[0]][0], self.pos[1] + step[Qvalues[0]][1]], free_kick):
step = step[Qvalues[0]]
break
else:
Qvalues = Qvalues[1:]
new_pos = [self.pos[0] + step[0],
self.pos[1] + step[1]]
if turn_number == 5 and new_pos in self.touched_points:
if player == self.P1:
self.win(self.P2)
else:
self.win(self.P1)
elif new_pos[1] in [y for y in range((self.size[1] - self.goal) // 2,
(self.size[1] + self.goal) // 2)] and new_pos[0] == -1:
self.win(self.P2)
elif new_pos[1] in [y for y in range((self.size[1] - self.goal) // 2,
(self.size[1] + self.goal) // 2)] and new_pos[0] == self.size[0]:
self.win(self.P1)
elif self.rules(prev_pos, new_pos, free_kick):
index = (+ min(prev_pos[1], new_pos[1])
* (self.size[0] - 1)
+ min(prev_pos[0], new_pos[0]))
if step[0] == 0: # senkrecht
self.lines_data[1][index] = 1
elif step[1] == 0: # waagerecht
self.lines_data[0][index] = 1
elif step[0] == step[1]:# diagonal ro lu
self.lines_data[3][index] = 1
else: # diagonal lo ru
self.lines_data[2][index] = 1
self.pos = new_pos
self.find_Dot(prev_pos).setstate(-1)
self.find_Dot(new_pos).setstate(1)
self.connect(prev_pos, new_pos)
self.touched_points.append(new_pos)
self.diagonals.append([prev_pos, new_pos])
self.root.update_idletasks()
else:
self.player_turn(player, turn_number, free_kick)
return True
def can_move(self):
"""
Checks if the player can move.
"""
for step in [[-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0]]:
if self.rules(self.pos, [self.pos[0] + step[0], self.pos[1] + step[1]], False):
return True
if self.pos[0] in [0, self.size[0] - 1] and self.pos[1] in [y for y in range((self.size[1] - self.goal) // 2, (self.size[1] + self.goal) // 2)]:
return True
return False
def rules(self, prev_pos, new_pos, free_kick):
"""
Checks if all rules were followed.
"""
if (prev_pos[0] != new_pos[0]) and (prev_pos[1] != new_pos[1]): #"Wenn es eine Diagonale ist, dann..."
cd = [[new_pos[0], prev_pos[1]], [prev_pos[0], new_pos[1]]] #cd = corresponding diagonal
if not (cd in self.diagonals or [cd[1], cd[0]] in self.diagonals): #"Wenn cd nicht in der Liste der bereits vorhandenen Diagonalen, dann..."
a = True #Regeln eingehalten; Diagonale kann gezeichnet werden
else:
a = False
else:
a = True #Regeln eingehalten, aber es ist keine Diagonale
#Regeln nicht eingehalten; Diagonale darf nicht gezeichnet werden
return ((0 <= new_pos[0] < self.size[0] and 0 <= new_pos[1] < self.size[1]) and #Spielfeldgroesse
((not new_pos in self.touched_points) or free_kick) and #Betretene Punkte nicht erneut betreten
(a or free_kick)) #Kreuzen der bereits vorhandenen Diagonalen
def win(self, player):
"""
Final method of the game, is called when one player has won.
"""
self.root.destroy()
win_msg = tk.Tk()
win_msg.title('Ende')
win_msg.geometry('200x100')
tk.Label(win_msg,
text=player.name + ' hat gewonnen!').place(x=100, y=25, anchor=tk.CENTER)
tk.Button(win_msg,
text='Spielverlauf ansehen',
command=self.game_replay).place(x=100, y=50, anchor=tk.CENTER)
tk.Button(win_msg,
text='Schließen',
command=win_msg.destroy).place(x=100, y=80, anchor=tk.CENTER)
win_msg.mainloop()
def game_replay(self):
pass
class Interface:
"""
The game interface to a player (human / AI).
"""
human = 'human'
computer = 'com'
def __init__(self, type, name=None):
if type in ['human', 'com']:
self.type = type
else:
raise TypeError('Interface type must be "human" or "com".')
if type == 'com':
self.net = ann.Neural_Network(name,
543,
(8, act_func.tanh),
[(543, act_func.tanh),
(543, act_func.tanh),
(543, act_func.tanh)],
.0,
readonly=True)
self.net.load('/media/lukas/BA87-AB98/Schule/SFA 17 KNN/Softwareprodukt/trained-ANNs/DATA11.net')
self.name = name if name != None else type
def set_step(self, step):
"""
Sets the internal step variable.
"""
self.step = {'KP_7': [-1, -1],
'KP_8': [0, -1],
'KP_9': [1, -1],
'KP_6': [1, 0],
'KP_3': [1, 1],
'KP_2': [0, 1],
'KP_1': [-1, 1],
'KP_4': [-1, 0]}[step]
self.master.quit()
def get_input(self, master, data):
"""
Gets an input from the player (human or AI).
"""
if self.type == 'human':
self.master = master
for event in ['<KP_7>', '<KP_8>', '<KP_9>', '<KP_6>', '<KP_3>', '<KP_2>', '<KP_1>', '<KP_4>']:
self.master.bind(event, lambda event: self.set_step(event.keysym))
self.master.mainloop()
elif self.type == 'com':
Qvalues = self.net.forward(np.array([data])) # 8 element array
# self.step = [[-1, -1],
# [0, -1],
# [1, -1],
# [1, 0],
# [1, 1],
# [0, 1],
# [-1, 1],
# [-1, 0]][np.argmax(Qvalues)]
self.step = dict(zip(Qvalues.tolist()[0], [[-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0]]))
return self.step
|
anon-42/ANN-PONR-Python3
|
PointOfNoReturn.py
|
Python
|
mit
| 12,028
|
[
"VisIt"
] |
8211d9fe39b238d4ff9e3e0ec005e39150186f469966a79ad6a7368561199a5f
|
#-*. coding: utf-8 -*-
## Copyright (c) 2012, Adrià Cereto-Massagué, Noel O'Boyle
## All rights reserved.
##
## This file is part of Cinfony.
## The contents are covered by the terms of the BSD license
## which is included in the file LICENSE_BSD.txt.
"""
jchem - A Cinfony module for accessing ChemAxon's JChem from CPython and Jython
Global variables:
chemaxon - the underlying JChem Java library
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import os
from glob import glob
if sys.platform[:4] == "java":
classpath = []
if 'JCHEMDIR' in os.environ:
assert os.path.isdir(os.path.join(os.environ['JCHEMDIR'], 'lib'))
for jar in glob(os.path.join(os.path.join(os.environ['JCHEMDIR'],'lib'), '*.jar')):
classpath.append(jar)
if sys.platform[:4] == "java" or sys.platform[:3] == "cli":
import sys
sys.path = classpath + sys.path
import java, javax
import chemaxon
from chemaxon.util import MolHandler
#Exceptions are handled differently in jpype and jython. We need to wrap them:
MolExportException = chemaxon.marvin.io.MolExportException
MolFormatException = chemaxon.formats.MolFormatException
else:
from jpype import *
if not isJVMStarted():
_jvm = os.environ['JPYPE_JVM']
if _jvm[0] == '"': # Remove trailing quotes
_jvm = _jvm[1:-1]
_cp = os.pathsep.join(os.environ.get('CLASSPATH', '').split(os.pathsep))
startJVM(_jvm, "-Djava.class.path=" + _cp)
chemaxon = JPackage("chemaxon")
MolHandler = chemaxon.util.MolHandler
try:
_testmol = MolHandler()
except TypeError:
raise ImportError, "jchem.jar file cannot be found."
# Exception wrappers for JPype
MolExportException = JavaException
MolFormatException = JavaException
_descset = set(['HAcc', 'HDon', 'Heavy', 'LogD', 'LogP', 'Mass', 'TPSA'])
_descset.update(dir(chemaxon.descriptors.scalars))
descs = [cls for cls in _descset if hasattr(getattr(chemaxon.descriptors.scalars, cls),'generate') and cls != 'LogD'] + ['RotatableBondsCount']
"""A list of supported descriptors"""
fps = ['ecfp']
"""A list of supported fingerprint types"""
forcefields = ["mmff94"]
"""A list of supported forcefields"""
informats = {
'smi': "SMILES"
,'cxsmi': "ChemAxon exntended SMILES"
,'mol': "MDL MOL"
,'sdf': "MDL SDF"
,'inchi': "InChI"
,'cml': "Chemical Markup Language"
, 'mrv':'Marvin Documents'
, 'skc':'ISIS/Draw sketch file'
, 'cdx':'ChemDraw sketch file'
, 'cdxml':'ChemDraw sketch file'
, "name":"Common name"
, "peptide":"Aminoacid sequence"
, "sybyl":"Tripos SYBYL"
, "pdb":"PDB"
, "xyz":"XYZ"
, 'cube':'Gaussian cube'
, 'gout':'Gaussian output format'
}
"""A dictionary of supported input formats"""
outformats = {
'smi': "SMILES"
,'cxsmi': "ChemAxon exntended SMILES"
,'mol': "MDL MOL"
,'sdf': "MDL SDF"
,'inchi': "InChI"
,'inchikey': "InChIKey"
,'cml': "CML"
, 'mrv':'Marvin Documents'
, 'skc':'ISIS/Draw sketch file'
, 'cdx':'ChemDraw sketch file'
, 'cdxml':'ChemDraw sketch file'
, "name":"Common name"
, "peptide":"Aminoacid sequence"
, "sybyl":"Tripos SYBYL"
, "pdb":"PDB"
, "xyz":"XYZ"
, 'cube':'Gaussian cube'
, 'gjf':'Gaussian input format'
}
"""A dictionary of supported output formats"""
def readfile(format, filename):
"""Iterate over the molecules in a file.
Required parameters:
format - Ignored, but needed for compatibility with other cinfony
modules and also good for readability
filename
You can access the first molecule in a file using the next() method
of the iterator:
mol = readfile("smi", "myfile.smi").next()
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print atomtotal
43
"""
if not os.path.isfile(filename):
raise IOError, "No such file: '%s'" % filename
if not format in outformats:
raise ValueError("%s is not a recognised JChem format" % format)
try:
mi = chemaxon.formats.MolImporter(filename)
mol = mi.read()
while mol:
mol.aromatize()
yield Molecule(mol)
mol = mi.read()
except chemaxon.formats.MolFormatException:
raise ValueError("%s is not a recognised JChem format" % format)
def readstring(format, string):
"""Read in a molecule from a string.
Required parameters:
format - Ignored, but needed for compatibility with other cinfony
modules and also good for readability
string
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
format = format.lower()
if format not in informats:
raise ValueError("%s is not a recognised JChem format" % format)
try:
mh = MolHandler(string)
return Molecule(mh.molecule)
except MolFormatException, ex:
if sys.platform[:4] != "java":
#Jpype exception
ex = ex.message()
raise IOError, ex
else:
raise IOError("Problem reading the supplied string")
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwite -- if the output file already exists, should it
be overwritten? (default is False)
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False):
if ':' in format:
format, options = format.split(':')
if options:
options = ':' + options
else:
options = ''
self.format = format.lower()
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError, "%s already exists. Use 'overwrite=True' to overwrite it." % self.filename
if format in ("smi", 'cxsmi'):
if not options:
options = ':a-H'
out = chemaxon.formats.MolExporter.exportToFormat(self.Molecule,format +'les:a-H')
try:
self._writer = chemaxon.formats.MolExporter(filename, format + options)
except MolExportException, e:
raise ValueError(e)
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError, "Outputfile instance is closed."
self._writer.write(molecule.Molecule)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.filename = None
self._writer.close()
class Molecule(object):
"""Represent a JChem Molecule.
Required parameters:
Molecule -- a JChem Molecule or any type of cinfony Molecule
Attributes:
atoms, data, exactmass, formula, molwt, title
Methods:
addh(), calcfp(), calcdesc(), draw(), removeh(), write()
The underlying JChem Molecule can be accessed using the attribute:
Molecule
The associated JChem MolHandler can be accessed using the attribute:
MolHandler
"""
_cinfony = True
def __init__(self, Molecule):
if hasattr(Molecule, "_cinfony"):
a, b = Molecule._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("sdf", b)
Molecule = mol.Molecule
self.Molecule = Molecule
self.MolHandler = chemaxon.util.MolHandler(self.Molecule)
self.MolHandler.aromatize()
@property
def atoms(self): return [Atom(atom) for atom in self.Molecule.atomArray]
@property
def data(self): return MoleculeData(self)
@property
def formula(self): return self.MolHandler.calcMolFormula()
@property
def exactmass(self):
return self.MolHandler.calcMolWeightInDouble()
@property
def molwt(self):
return self.MolHandler.calcMolWeight()
def _gettitle(self): return self.Molecule.getName()
def _settitle(self, val): self.Molecule.setName(val)
title = property(_gettitle, _settitle)
@property
def _exchange(self):
if self.Molecule.dim > 1:
return (1, self.write("mol"))
else:
return (0, self.write("smi"))
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print atom
"""
return iter(self.atoms)
def __str__(self):
return self.write()
def addh(self):
"""Add hydrogens."""
self.MolHandler.addHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.MolHandler.removeHydrogens()
def write(self, format="smi", filename=None, overwrite=False):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if ':' in format:
format, options = format.split(':')
if options:
options = ':' + options
else:
options = ''
format = format.lower()
if format not in outformats:
raise ValueError("%s is not a recognised format" % format)
if filename is not None and not overwrite and os.path.isfile(filename):
raise IOError, "%s already exists. Use 'overwrite=True' to overwrite it." % filename
if format in ("smi", 'cxsmi'):
if not options:
options = ':a-H'
out = chemaxon.formats.MolExporter.exportToFormat(self.Molecule,format +'les' + options)
elif format == 'inchikey':
out = chemaxon.formats.MolExporter.exportToFormat(self.Molecule,'inchikey').replace('InChIKey=', '')
else:
out = chemaxon.formats.MolExporter.exportToFormat(self.Molecule,format + options)
if format == 'inchi':
out = out.split('AuxInfo=')[0]
if filename:
output = open(filename, "w")
print >> output, out
output.close()
return
else:
return out
def calcfp(self, fp="ecfp"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "daylight"). See the
fps variable for a list of of available fingerprint
types.
"""
fp = fp.lower()
if fp in fps:
if fp == 'ecfp':
fp = chemaxon.descriptors.ECFP(ECFPConfiguration)
fp.generate(self.Molecule)
else:
raise ValueError, "%s is not a recognised fingerprint type" % fp
return Fingerprint(fp)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
if descname not in descs:
raise ValueError, "%s is not a recognised descriptor type" % descname
if descname == 'RotatableBondsCount':
ta = chemaxon.calculations.TopologyAnalyser()
ta.setMolecule(self.Molecule)
ans[descname] = ta.rotatableBondCount()
else:
desc = getattr(chemaxon.descriptors.scalars, descname)('')
desc.generate(self.Molecule)
ans[descname] = desc.toFloatArray()[0]
return ans
def make3D(self):
"""Generate 3D coordinates.
Hydrogens are added, and a low energy conformer is found
using the MMFF94 forcefield.
"""
self.addh()
cp = chemaxon.marvin.calculations.ConformerPlugin()
cp.setMolecule(self.Molecule)
cp.setLowestEnergyConformerCalculation(True)
cp.setMMFF94Optimization(True)
success = cp.run()
optmol = cp.getMMFF94OptimizedStrucutre()
self.Molecule = optmol
self.MolHandler = chemaxon.util.MolHandler(self.Molecule)
self.MolHandler.aromatize()
def draw(self, show=True, filename=None, update=False,
usecoords=False):
"""Create a 2D depiction of the molecule.
"""
if not usecoords:
molecule = self.Molecule.clone()
molecule.setDim(0)
else:
molecule = self.Molecule
if update:
myMolecule = readstring("mol", Molecule(molecule).write("mol"))
self.Molecule = myMolecule.Molecule
self.MolHandler = myMolecule.MolHandler
bytearray = chemaxon.formats.MolExporter.exportToBinFormat(molecule, 'png')
if filename:
of = java.io.FileOutputStream(filename)
of.write(bytearray)
of.close()
if show:
source = java.io.ByteArrayInputStream(bytearray)
reader = javax.imageio.ImageIO.getImageReadersByFormatName('png').next()
iis = javax.imageio.ImageIO.createImageInputStream(source)
reader.setInput(iis, True)
param = reader.getDefaultReadParam()
image = reader.read(0, param)
frame = javax.swing.JFrame()
imageIcon = javax.swing.ImageIcon(image)
label = javax.swing.JLabel()
label.setIcon(imageIcon)
frame.getContentPane().add(label, java.awt.BorderLayout.CENTER)
frame.pack()
frame.setVisible(True)
frame.show()
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by one of the fingerprint methods
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For example,
given two Fingerprints 'a', and 'b', the Tanimoto coefficient is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return 1 - self.fp.getTanimoto(other.fp)
def __getattr__(self, attr):
if attr == "bits":
# Create a bits attribute on-the-fly
bs = self.fp.toBitSet()
bits = [-1]
while True:
setbit = bs.nextSetBit(bits[-1] + 1)
if setbit == -1:
break
bits.append(setbit)
return bits[1:] # Leave out the initial '-1'
else:
raise AttributeError, "Fingerprint has no attribute %s" % attr
def __str__(self):
return ", ".join([str(x) for x in self.fp.toIntArray()])
class Atom(object):
"""Represent an Atom.
Required parameters:
Atom -- a JChem Atom
Attributes:
atomicnum, coords, formalcharge
The original JChem Atom can be accessed using the attribute:
Atom
"""
def __init__(self, Atom):
self.Atom = Atom
@property
def atomicnum(self): return self.Atom.getAtno()
@property
def coords(self):
return (self.Atom.x, self.Atom.y, self.Atom.z)
@property
def formalcharge(self):
return self.Atom.charge
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall()
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print smarts.findall(mol)
[(1, 2), (4, 5), (6, 7)]
"""
def __init__(self, smartspattern):
"""Initialise with a SMARTS pattern."""
self.search = chemaxon.sss.search.MolSearch()
smarts = MolHandler(smartspattern)
smarts.setQueryMode(True)
smarts.aromatize()
self.search.setQuery(smarts.molecule)
def findall(self, molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.search.setTarget(molecule.Molecule)
match = self.search.findAll()
result = []
for i in xrange(len(match)):
result.append(tuple([n+1 for n in match[i]]))
return result
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
Molecule -- a JChem Molecule
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying Molecule.
Example:
>>> mol = readfile("sdf", 'head.sdf').next()
>>> data = mol.data
>>> print data
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print len(data), data.keys(), data.has_key("NSC")
2 ['Comment', 'NSC'] True
>>> print data['Comment']
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.iteritems():
... print k, "-->", v
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print len(data), data.keys(), data.has_key("NSC")
1 ['Comment'] False
"""
def __init__(self, Molecule):
self._data = Molecule.Molecule.properties()
def _testforkey(self, key):
if not key in self:
raise KeyError, "'%s'" % key
def keys(self):
return list(self._data.keys)
def values(self):
return [self[k] for k in self._data.keys]
def items(self):
return [(k, self[k]) for k in self._data.keys]
def __iter__(self):
return iter(self.keys())
def iteritems(self):
return iter(self.items())
def __len__(self):
return len(self._data.keys)
def __contains__(self, key):
return key in self.keys()
def __delitem__(self, key):
self._testforkey(key)
self._data.setString(key, None)
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.iteritems():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
return self._data.get(key).propValue
def __setitem__(self, key, value):
self._data.setString(key, str(value))
def __repr__(self):
return dict(self.iteritems()).__repr__()
ECFPConfiguration = """<?xml version="1.0" encoding="UTF-8"?>
<ECFPConfiguration Version="0.1">
<Parameters Length="1024" Diameter="4" Counts="no"/>
<IdentifierConfiguration>
<!-- Default atom properties (switched on by Value=1) -->
<Property Name="AtomicNumber" Value="1"/>
<Property Name="HeavyNeighborCount" Value="1"/>
<Property Name="HCount" Value="1"/>
<Property Name="FormalCharge" Value="1"/>
<Property Name="IsRingAtom" Value="1"/>
<!-- Other built-in atom properties (switched off by Value=0) -->
<Property Name="ConnectionCount" Value="0"/>
<Property Name="Valence" Value="0"/>
<Property Name="Mass" Value="0"/>
<Property Name="MassNumber" Value="0"/>
<Property Name="HasAromaticBond" Value="0"/>
<Property Name="IsTerminalAtom" Value="0"/>
<Property Name="IsStereoAtom" Value="0"/>
</IdentifierConfiguration>
<StandardizerConfiguration Version="0.1">
<Actions>
<Action ID="aromatize" Act="aromatize"/>
<RemoveExplicitH ID="RemoveExplicitH" Groups="target"/>
</Actions>
</StandardizerConfiguration>
<ScreeningConfiguration>
<ParametrizedMetrics>
<ParametrizedMetric Name="Tanimoto" ActiveFamily="Generic" Metric="Tanimoto" Threshold="0.5"/>
<ParametrizedMetric Name="Euclidean" ActiveFamily="Generic" Metric="Euclidean" Threshold="10"/>
</ParametrizedMetrics>
</ScreeningConfiguration>
</ECFPConfiguration>
"""
if __name__=="__main__": #pragma: no cover
mol = readstring("smi", "CC(=O)Cl")
mol.title = u"Adrià"
mol.draw()
for mol in readfile("sdf", "head.sdf"):
pass
|
cinfony/cinfony
|
cinfony/jchem.py
|
Python
|
bsd-2-clause
| 21,832
|
[
"Gaussian"
] |
9776a61cc5949909335424856b84f416ba16fb8270960ec74ce7addc34e0ae96
|
import numpy as np
import theano
import theano.tensor as T
import unittest
import tempfile
from numpy.testing import assert_array_equal
from smartlearner import views, stopping_criteria, Trainer, tasks
from smartlearner.direction_modifiers import DecreasingLearningRate
from smartlearner.optimizers import SGD
from smartlearner.testing import DummyLoss, DummyBatchScheduler
from smartlearner.utils import sharedX
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.param = param
def _get_gradients(self):
gparam = T.grad(cost=self.cost, wrt=self.param)
return {self.param: gparam}
class TestDecreasingLearningRate(unittest.TestCase):
def _build_experiment(self):
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
optimizer = SGD(loss)
direction_modifier = DecreasingLearningRate(lr=self.lr, dc=self.dc)
optimizer.append_direction_modifier(direction_modifier)
trainer = Trainer(optimizer, DummyBatchScheduler())
# Monitor the learning rate.
logger = tasks.Logger(views.MonitorVariable(list(direction_modifier.parameters.values())[0]))
trainer.append_task(logger)
return trainer, logger, direction_modifier
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lr = 1
self.dc = 0.5
self.max_epoch = 10
self.trainer, self.logger, self.direction_modifier = self._build_experiment()
self.trainer.append_task(stopping_criteria.MaxEpochStopping(self.max_epoch))
self.trainer.train()
def test_behaviour(self):
learning_rate_per_update = np.array(self.logger.get_variable_history(0))[:, :, 0].flatten()
expected_learning_rate_per_update = [self.lr * self.dc**i for i in range(self.max_epoch)]
assert_array_equal(learning_rate_per_update, expected_learning_rate_per_update)
def test_save_load(self):
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
self.trainer.save(experiment_dir)
# Load previous training state of the experiment.
trainer, logger, direction_modifier = self._build_experiment()
trainer.load(experiment_dir)
# Check the state of the direction modifier.
for key in direction_modifier.parameters:
assert_array_equal(direction_modifier.parameters[key].get_value(),
self.direction_modifier.parameters[key].get_value())
def test_resume(self):
trainer1, logger1, direction_modifier1 = self._build_experiment()
trainer1.append_task(stopping_criteria.MaxEpochStopping(5))
trainer1.train()
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
trainer1.save(experiment_dir)
# Load previous training state of the experiment.
trainer2, logger2, direction_modifier2 = self._build_experiment()
trainer2.append_task(stopping_criteria.MaxEpochStopping(10))
trainer2.load(experiment_dir)
trainer2.train()
# Check that concatenating `logger1` with `logger2` is the same as `self.logger`.
learning_rate_per_update_part1 = np.array(logger1.get_variable_history(0))[:, :, 0].flatten()
learning_rate_per_update_part2 = np.array(logger2.get_variable_history(0))[:, :, 0].flatten()
expected_learning_rate_per_update = np.array(self.logger.get_variable_history(0))[:, :, 0].flatten()
assert_array_equal(np.r_[learning_rate_per_update_part1, learning_rate_per_update_part2],
expected_learning_rate_per_update)
|
ASalvail/smartlearner
|
tests/direction_modifiers/test_decreasing_learning_rate.py
|
Python
|
bsd-3-clause
| 4,346
|
[
"Gaussian"
] |
a632bbe68981a5b0af09e1c80d224ab3832950039f6fe7079818af8a1d8965f9
|
# -*- coding: utf-8 -*-
# pylint: disable=E1103,C0103
"""
Copyright 2013-2014 Olivier Cortès <oc@1flow.io>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from constance import config
from django.core import mail
from django.test import TestCase # TransactionTestCase
from django.test.utils import override_settings
from django.contrib.auth import get_user_model
from oneflow.core.models import (Feed, Subscription, PseudoQuerySet,
Article, Read, Folder, TreeCycleException,
User, Group, Tag, WebSite, Author)
from oneflow.core.tasks import global_feeds_checker
from oneflow.base.utils import RedisStatsCounter
from oneflow.base.tests import (connect_mongodb_testsuite, TEST_REDIS)
DjangoUser = get_user_model()
LOGGER = logging.getLogger(__file__)
# Use the test database not to pollute the production/development one.
RedisStatsCounter.REDIS = TEST_REDIS
TEST_REDIS.flushdb()
connect_mongodb_testsuite()
# Empty the database before starting in case an old test failed to tearDown().
Article.drop_collection()
Read.drop_collection()
User.drop_collection()
Group.drop_collection()
Feed.drop_collection()
Tag.drop_collection()
Folder.drop_collection()
WebSite.drop_collection()
Author.drop_collection()
class ThrottleIntervalTest(TestCase):
def test_lower_interval_with_etag_or_modified(self):
t = Feed.throttle_fetch_interval
some_news = 10
no_dupe = 0
no_mutual = 0
self.assertEquals(t(1000, some_news, no_mutual, no_dupe,
'etag', 'last_modified'), 540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe,
'', 'last_modified'), 540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe,
None, 'last_modified'), 540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe, 'etag', ''),
540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe, 'etag', None),
540.0)
def test_lower_interval_with_etag_or_modified_and_mutualized(self):
t = Feed.throttle_fetch_interval
no_news = 0
no_dupe = 0
a_dupe = 1
a_mutual = 1
self.assertEquals(t(1000, no_news, a_mutual, no_dupe,
'etag', 'last_modified'), 800.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe,
'', 'last_modified'), 800.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe,
None, 'last_modified'), 800.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe, 'etag', ''),
800.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe, 'etag', None),
800.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe,
'etag', 'last_modified'), 900.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe,
'', 'last_modified'), 900.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe,
None, 'last_modified'), 900.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe, 'etag', ''),
900.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe, 'etag', None),
900.0)
def test_raise_interval_with_etag_or_modified(self):
t = Feed.throttle_fetch_interval
some_news = 10
no_news = 0
a_dupe = 1
no_mutual = 0
# news, but a dupe > raise-
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
'etag', 'last_modified'), 810.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
'', 'last_modified'), 810.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
None, 'last_modified'), 810.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
'etag', ''), 810.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
'etag', None), 810.0)
# no news, a dupe > raise+
self.assertEquals(t(1000, no_news, no_mutual, a_dupe,
'etag', 'last_modified'), 1250)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe,
'', 'last_modified'), 1250)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe,
None, 'last_modified'), 1250)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe,
'etag', ''), 1250)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe,
'etag', None), 1250)
def test_lowering_interval_without_etag_nor_modified(self):
t = Feed.throttle_fetch_interval
some_news = 10
no_dupe = 0
no_mutual = 0
a_mutual = 1
# news, no dupes > raise+ (etag don't count)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe, '', ''),
540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe, None, None),
540.0)
self.assertEquals(t(1000, some_news, a_mutual, no_dupe, '', ''),
630.0)
self.assertEquals(t(1000, some_news, a_mutual, no_dupe, None, None),
630.0)
def test_raising_interval_without_etag_nor_modified(self):
t = Feed.throttle_fetch_interval
some_news = 10
no_news = 0
a_dupe = 1
no_dupe = 0
no_mutual = 0
a_mutual = 1
self.assertEquals(t(1000, some_news, no_mutual, no_dupe, '', ''),
540.0)
self.assertEquals(t(1000, some_news, no_mutual, no_dupe,
None, None), 540.0)
self.assertEquals(t(1000, some_news, a_mutual, no_dupe, '', ''),
630.0)
self.assertEquals(t(1000, some_news, a_mutual, no_dupe,
None, None), 630.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe, '', ''),
810.0)
self.assertEquals(t(1000, some_news, no_mutual, a_dupe,
None, None), 810.0)
self.assertEquals(t(1000, some_news, a_mutual, a_dupe, '', ''),
720.0000000000001)
self.assertEquals(t(1000, some_news, a_mutual, a_dupe,
None, None), 720.0000000000001)
self.assertEquals(t(1000, no_news, no_mutual, no_dupe,
'', ''), 1000.0)
self.assertEquals(t(1000, no_news, no_mutual, no_dupe,
None, None), 1000.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe,
'', ''), 800.0)
self.assertEquals(t(1000, no_news, a_mutual, no_dupe,
None, None), 800.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe, '', ''), 900.0)
self.assertEquals(t(1000, no_news, a_mutual, a_dupe, None, None), 900.0)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe, '', ''), 1125)
self.assertEquals(t(1000, no_news, no_mutual, a_dupe, None, None), 1125)
def test_less_news(self):
t = Feed.throttle_fetch_interval
more_news = config.FEED_FETCH_RAISE_THRESHOLD + 5
less_news = config.FEED_FETCH_RAISE_THRESHOLD - 5
just_one = 1
a_dupe = 1
no_dupe = 0
no_mutual = 0
self.assertEquals(t(1000, just_one, no_mutual, a_dupe,
'etag', ''), 900.0)
self.assertEquals(t(1000, less_news, no_mutual, a_dupe,
'etag', None), 900.0)
self.assertEquals(t(1000, more_news, no_mutual, a_dupe,
'etag', None), 810.0)
self.assertEquals(t(1000, just_one, no_mutual, no_dupe,
'etag', ''), 600.0)
self.assertEquals(t(1000, less_news, no_mutual, no_dupe,
'etag', None), 600.0)
self.assertEquals(t(1000, more_news, no_mutual, no_dupe,
'etag', None), 540.0)
def test_limits(self):
t = Feed.throttle_fetch_interval
some_news = 10
no_news = 0
a_dupe = 1
no_dupe = 0
no_mutual = 0
# new articles already at max stay at max.
self.assertEquals(t(config.FEED_FETCH_MAX_INTERVAL, no_news,
no_mutual, a_dupe, '', ''),
config.FEED_FETCH_MAX_INTERVAL)
self.assertEquals(t(config.FEED_FETCH_MAX_INTERVAL, no_news,
no_mutual, a_dupe, 'etag', ''),
config.FEED_FETCH_MAX_INTERVAL)
self.assertEquals(t(config.FEED_FETCH_MAX_INTERVAL, no_news,
no_mutual, a_dupe, None, 'last_mod'),
config.FEED_FETCH_MAX_INTERVAL)
# dupes at min stays at min
self.assertEquals(t(config.FEED_FETCH_MIN_INTERVAL, some_news,
no_mutual, no_dupe, '', ''),
config.FEED_FETCH_MIN_INTERVAL)
self.assertEquals(t(config.FEED_FETCH_MIN_INTERVAL, some_news,
no_mutual, no_dupe, 'etag', None),
config.FEED_FETCH_MIN_INTERVAL)
self.assertEquals(t(config.FEED_FETCH_MIN_INTERVAL, some_news,
no_mutual, no_dupe, '', 'last_mod'),
config.FEED_FETCH_MIN_INTERVAL)
class PseudoQuerySetTest(TestCase):
def test_append(self):
q1 = PseudoQuerySet()
q1.append(1)
q1.append(2)
q1.append(3)
self.assertTrue(len(q1) == 3)
self.assertTrue(1 in q1)
self.assertTrue(2 in q1)
self.assertTrue(3 in q1)
def test_extend(self):
q1 = PseudoQuerySet()
q1.append(1)
q1.append(2)
q1.append(3)
q2 = PseudoQuerySet()
q2.append(4)
q2.append(5)
q2.append(6)
#LOGGER.warning(q1)
q1.extend(q2)
#LOGGER.warning(q1)
self.assertTrue(len(q1) == 6)
self.assertTrue(4 in q1)
self.assertTrue(5 in q1)
self.assertTrue(6 in q1)
def test_append_extend(self):
q1 = PseudoQuerySet()
q1.append(1)
q1.append(2)
q2 = PseudoQuerySet()
q2.append(4)
q2.append(5)
q3 = PseudoQuerySet()
q3.append(7)
q3.append(8)
#LOGGER.warning(q1)
q1.append(3)
q1.extend(q2)
q1.append(6)
q1.extend(q3)
q1.append(9)
#LOGGER.warning(q1)
self.assertTrue(len(q1) == 9)
self.assertTrue(4 in q1)
self.assertTrue(6 in q1)
self.assertTrue(8 in q1)
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class FeedsTest(TestCase):
def setUp(self):
# NOTE: we need real web pages, else the absolutization won't work or
# will find duplicates and tests will fail for a real-life reason.
self.article1 = Article(title='test1',
url='http://blog.1flow.io/post/'
'59410536612/1flow-blog-has-moved').save()
self.feed = Feed(name='1flow test feed',
url='http://blog.1flow.io/rss').save()
self.article1.update(add_to_set__feeds=self.feed)
self.article1.reload()
# User & Reads creation
for index in xrange(1, 2):
username = 'test_user_%s' % index
du = DjangoUser.objects.create(username=username,
email='%s@test.1flow.io' % username)
# PG post_save() signal already created the MongoDB user.
u = du.mongo
Read(user=u, article=self.article1).save()
Subscription(user=u, feed=self.feed).save()
for index in xrange(2, 5):
username = 'test_user_%s' % index
du = DjangoUser.objects.create(username=username,
email='%s@test.1flow.io' % username)
def tearDown(self):
Subscription.drop_collection()
Feed.drop_collection()
Read.drop_collection()
Article.drop_collection()
User.drop_collection()
def test_close(self):
closed_reason = u'closed for tests'
self.feed.close(closed_reason)
self.assertTrue(self.feed.closed)
self.assertEquals(self.feed.closed_reason, closed_reason)
self.assertFalse(self.feed.date_closed is None)
global_feeds_checker()
self.assertEquals(len(mail.outbox), 1)
self.assertTrue(u'Reminder: 1 feed(s) closed in last'
in mail.outbox[0].subject)
self.assertTrue(unicode(self.feed) in mail.outbox[0].body)
#self.assertEqual( mail.outbox[0].to, [ "test@foo.bar" ] )
#self.assertTrue( "test@foo.bar" in mail.outbox[0].to )
def test_feeds_creation(self):
# .setUp() creates one already.
self.assertEquals(Feed._get_collection().count(), 1)
feed, created = Feed.create_feeds_from_url(u'http://ntoll.org/')[0]
self.assertTrue(created)
self.assertEquals(feed.url, u'http://ntoll.org/rss.xml')
self.assertEquals(Feed._get_collection().count(), 2)
# Via the Home Page
feed, created = Feed.create_feeds_from_url(u'http://www.zdnet.fr/')[0]
self.assertTrue(created)
self.assertEquals(feed.url, u'http://www.zdnet.fr/feeds/rss/')
self.assertEquals(Feed._get_collection().count(), 3)
# Via the RSS listing page
feed, created = Feed.create_feeds_from_url(u'http://www.zdnet.fr/services/rss/')[0] # NOQA
self.assertFalse(created)
self.assertEquals(feed.url, u'http://www.zdnet.fr/feeds/rss/')
self.assertEquals(Feed._get_collection().count(), 3)
# Via the first RSS (raw)
feed, created = Feed.create_feeds_from_url(u'http://www.zdnet.fr/feeds/rss/')[0] # NOQA
self.assertFalse(created)
self.assertEquals(feed.url, u'http://www.zdnet.fr/feeds/rss/')
self.assertEquals(Feed._get_collection().count(), 3)
feed, created = Feed.create_feeds_from_url(u'http://www.atlantico.fr/')[0] # NOQA
self.assertTrue(created)
self.assertEquals(feed.url, u'http://www.atlantico.fr/rss.xml')
self.assertEquals(Feed._get_collection().count(), 4)
feed, created = Feed.create_feeds_from_url(u'http://wordpress.org/')[0]
self.assertTrue(created)
self.assertEquals(feed.url, u'http://wordpress.org/news/feed/')
self.assertEquals(Feed._get_collection().count(), 5)
# Not created again, even from an article which has the comment feed.
feed, created = Feed.create_feeds_from_url(u'http://ntoll.org/article/build-a-drogulus')[0] # NOQA
self.assertFalse(created)
self.assertEquals(feed.url, u'http://ntoll.org/rss.xml')
self.assertEquals(Feed._get_collection().count(), 5)
# This one has been created in .setUp()
feed, created = Feed.create_feeds_from_url(u'http://blog.1flow.io/')[0]
self.assertFalse(created)
self.assertEquals(feed.url, u'http://blog.1flow.io/rss')
self.assertEquals(Feed._get_collection().count(), 5)
# No RSS in main page
self.assertRaises(Exception, Feed.create_feeds_from_url,
u'http://www.bbc.co.uk/')
self.assertEquals(Feed._get_collection().count(), 5)
# This one has no RSS anywhere, it won't create anything
self.assertRaises(Exception, Feed.create_feeds_from_url,
u'http://www.tumblr.com/blog/1flowio')
self.assertEquals(Feed._get_collection().count(), 5)
def test_closed_feeds_are_never_good(self):
""" This test addresses Github #10.
It is very simple, but the `.good_feeds` query is quite complex.
"""
self.assertTrue(len(Feed.good_feeds) == 1)
closed_reason = u'closed for tests'
self.feed.close(closed_reason)
self.assertTrue(len(Feed.good_feeds) == 0)
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class ArticleDuplicateTest(TestCase):
def setUp(self):
# NOTE: we need real web pages, else the absolutization won't work or
# will find duplicates and tests will fail for a real-life reason.
self.article1 = Article(title='test1',
url='http://blog.1flow.io/post/'
'59410536612/1flow-blog-has-moved').save()
self.article2 = Article(title='test2',
url='http://obi.1flow.io/fr/').save()
self.article3 = Article(title='test3',
url='http://obi.1flow.io/en/').save()
# User & Reads creation
for index in xrange(1, 6):
username = 'test_user_%s' % index
du = DjangoUser.objects.create(username=username,
email='%s@test.1flow.io' % username)
# NOTE: the mongoDB user is created automatically. If you
# try to create one it will fail with duplicate index error.
u = du.mongo
Read(user=u, article=self.article1).save()
for index in xrange(6, 11):
username = 'test_user_%s' % index
du = DjangoUser.objects.create(username=username,
email='%s@test.1flow.io' % username)
u = du.mongo
Read(user=u, article=self.article2).save()
# Feeds creation
for index in xrange(1, 6):
f = Feed(name='test feed #%s' % index,
url='http://test-feed%s.com' % index).save()
self.article1.update(add_to_set__feeds=f)
self.article1.reload()
for index in xrange(6, 11):
f = Feed(name='test feed #%s' % index,
url='http://test-feed%s.com' % index).save()
self.article2.update(add_to_set__feeds=f)
self.article2.reload()
def tearDown(self):
Article.drop_collection()
User.drop_collection()
Read.drop_collection()
Feed.drop_collection()
def test_register_duplicate_bare(self):
self.assertEquals(Article.objects(
duplicate_of__exists=False).count(), 3)
self.article1.register_duplicate(self.article2)
# needed because feeds are modified in another instance of the
# same dabase record, via the celery task.
self.article1.safe_reload()
self.assertEquals(self.article1.reads.count(), 10)
self.assertEquals(self.article2.reads.count(), 0)
self.assertEquals(len(self.article1.feeds), 10)
self.assertEquals(len(self.article2.feeds), 5)
self.assertEquals(self.article2.duplicate_of, self.article1)
self.assertEquals(Article.objects(
duplicate_of__exists=True).count(), 1)
self.assertEquals(Article.objects(
duplicate_of__exists=False).count(), 2)
def test_register_duplicate_not_again(self):
self.article1.register_duplicate(self.article2)
self.article1.safe_reload()
self.assertEquals(self.article2.duplicate_of, self.article1)
#
# TODO: finish this test case.
#
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class AbsolutizeTest(TestCase):
def setUp(self):
#Article.drop_collection()
#Feed.drop_collection()
self.article1 = Article(title=u'test1',
url=u'http://rss.feedsportal.com/c/707/f/9951/s/2b27496a/l/0L0Sreseaux0Etelecoms0Bnet0Cactualites0Clire0Elancement0Emondial0Edu0Esamsung0Egalaxy0Es40E25980A0Bhtml/story01.htm').save() # NOQA
self.article2 = Article(title=u'test2',
url=u'http://feedproxy.google.com/~r/francaistechcrunch/~3/hEIhLwVyEEI/').save() # NOQA
self.article3 = Article(title=u'test3',
url=u'http://obi.1flow.io/absolutize_test_401').save() # NOQA
self.article4 = Article(title=u'test4',
url=u'http://host.non.exixstentz.com/absolutize_test').save() # NOQA
self.article5 = Article(title=u'test5',
url=u'http://1flow.io/absolutize_test_404').save() # NOQA
def tearDown(self):
Article.drop_collection()
Feed.drop_collection()
def test_absolutize(self):
self.article1.absolutize_url()
self.assertEquals(self.article1.url, u'http://www.reseaux-telecoms.net/actualites/lire-lancement-mondial-du-samsung-galaxy-s4-25980.html') # NOQA
self.assertEquals(self.article1.url_absolute, True)
self.assertEquals(self.article1.url_error, '')
self.article2.absolutize_url()
self.assertEquals(self.article2.url, u'http://techcrunch.com/2013/05/18/hell-no-tumblr-users-wont-go-to-yahoo/') # NOQA
self.assertEquals(self.article2.url_absolute, True)
self.assertEquals(self.article2.url_error, '')
def test_absolutize_errors(self):
#
# NOTE: if a PROXY is set, the reasons word cases can vary.
# eg. 'Not Found' (via Squid) instead of 'NOT FOUND' (direct answer).
#
self.article3.absolutize_url()
self.assertEquals(self.article3.url, u'http://obi.1flow.io/absolutize_test_401') # NOQA
self.assertEquals(self.article3.url_absolute, False)
self.assertEquals(self.article3.url_error, u'HTTP Error 401 (Unauthorized) while resolving http://obi.1flow.io/absolutize_test_401.') # NOQA
self.article5.absolutize_url()
self.assertEquals(self.article5.url, u'http://1flow.io/absolutize_test_404') # NOQA
self.assertEquals(self.article5.url_absolute, False)
self.assertEquals(self.article5.url_error, u'HTTP Error 404 (NOT FOUND) while resolving http://1flow.io/absolutize_test_404.') # NOQA
self.article4.absolutize_url()
self.assertEquals(self.article4.url, u'http://host.non.exixstentz.com/absolutize_test') # NOQA
self.assertEquals(self.article4.url_absolute, False)
self.assertEquals(self.article4.url_error[:108], u"HTTPConnectionPool(host='host.non.exixstentz.com', port=80): Max retries exceeded with url: /absolutize_test") # NOQA
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class TagsTest(TestCase):
def setUp(self):
self.t1 = Tag(name='test1').save()
self.t2 = Tag(name='test2').save()
self.t3 = Tag(name='test3').save()
def tearDown(self):
Tag.drop_collection()
def test_add_parent(self):
self.t2.add_parent(self.t1)
self.t3.add_parent(self.t1)
self.assertEquals(self.t1 in self.t2.parents, True)
self.assertEquals(self.t1 in self.t3.parents, True)
self.assertEquals(self.t2 in self.t1.children, True)
self.assertEquals(self.t3 in self.t1.children, True)
def test_add_child(self):
self.t1.add_child(self.t2)
self.t1.add_child(self.t3)
self.assertEquals(self.t1 in self.t2.parents, True)
self.assertEquals(self.t1 in self.t3.parents, True)
self.assertEquals(self.t2 in self.t1.children, True)
self.assertEquals(self.t3 in self.t1.children, True)
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class WebSitesTest(TestCase):
def setUp(self):
WebSite.drop_collection()
Article.drop_collection()
self.ws1 = WebSite(url='http://test1.com').save()
self.ws2 = WebSite(url='http://test2.com').save()
def tearDown(self):
WebSite.drop_collection()
Article.drop_collection()
def test_get_or_create_website(self):
wt1, created = WebSite.get_or_create_website('http://test1.com')
self.assertFalse(created)
self.assertEquals(wt1, self.ws1)
wt3, created = WebSite.get_or_create_website('http://test3.com')
self.assertTrue(created)
self.assertNotEquals(wt3, self.ws1)
self.assertNotEquals(wt3, self.ws2)
wt4, created = WebSite.get_or_create_website('http://test3.com')
self.assertFalse(created)
self.assertEquals(wt3, wt4)
wt5, created = WebSite.get_or_create_website('http://test3.com/')
self.assertTrue(created)
self.assertNotEquals(wt5, wt4)
def test_get_from_url(self):
wt1 = WebSite.get_from_url('http://test1.com/example-article')
wt2 = WebSite.get_from_url('http://test1.com/example-article2')
self.assertEquals(wt1, self.ws1)
self.assertEquals(wt1, self.ws1)
self.assertEquals(wt1, wt2)
def test_register_duplicate_not_again(self):
wt1, created = WebSite.get_or_create_website('http://other.test1.com')
self.ws1.register_duplicate(wt1)
self.assertTrue(created)
self.assertEquals(wt1.duplicate_of, self.ws1)
wt2, created = WebSite.get_or_create_website('http://other.test1.com')
self.assertFalse(created)
self.assertNotEquals(wt2, wt1)
self.assertEquals(wt2, self.ws1)
# should fail.
#self.ws2.register_duplicate(wt1)
#
# TODO: finish this test case.
#
def test_websites_duplicates(self):
pass
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class AuthorsTest(TestCase):
pass
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class UsersTest(TestCase):
def setUp(self):
self.django_user = DjangoUser.objects.create_user(
username='testuser', password='testpass',
email='test-ocE3f6VQqFaaAZ@1flow.io')
# Auto-created on PG's post_save().
self.mongodb_user = self.django_user.mongo
def tearDown(self):
User.drop_collection()
def test_user_property(self):
self.assertEquals(self.django_user.mongo, self.mongodb_user)
def test_user_preferences(self):
# We just want to be sure preferences are created when a new
# user is, and all the embedded documents are created too.
self.assertEquals(self.django_user.mongo.preferences.home.style, u'RL')
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class FoldersTest(TestCase):
def setUp(self):
self.django_user = DjangoUser.objects.create_user(
username='testuser', password='testpass',
email='test-ocE3f6VQqFaaAZ@1flow.io')
# Auto-created on PG's post_save().
self.mongodb_user = self.django_user.mongo
def tearDown(self):
User.drop_collection()
Folder.drop_collection()
def test_properties(self):
user = self.mongodb_user
self.assertEquals(len(user.folders), 0)
# Be sure 2 calls to `Folder.get_root_for` don't create 2 folders.
# The second call must return the
root = Folder.get_root_for(user)
# The root folder is kind of hidden and is not counted in folders.
self.assertEquals(len(user.folders), 0)
self.assertEquals(len(user.folders_tree), 0)
self.assertEquals(root, user.root_folder)
def test_parent_children_root(self):
user = self.mongodb_user
root = user.root_folder
self.assertEquals(len(user.folders), 0)
ftest1, created = Folder.add_folder('test1', user)
self.assertEquals(len(user.folders), 1)
ftest2, created = Folder.add_folder('test2', user)
ftest3, created = Folder.add_folder('test3', user)
#We didn't pass "root" as argument. `Folder` class
# updated the DB, but not our local instance.
root.reload()
self.assertEquals(len(root.children), 3)
for folder in (ftest1, ftest2, ftest3):
self.assertTrue(folder in root.children)
self.assertTrue(folder.parent == root)
self.assertEquals(len(user.folders), 3)
def test_parent_children_multiple(self):
user = self.mongodb_user
root = user.root_folder
ftest1, created = Folder.add_folder('test1', user)
self.assertEquals(len(user.folders), 1)
ftest2, created = Folder.add_folder('test2', user, ftest1)
ftest3, created = Folder.add_folder('test3', user, ftest1)
# We didn't pass "root" as argument. `Folder` class
# updated the DB, but not our local instance. This
# will implicitely reload a full folder hierarchy
# from the database.
root.reload()
self.assertEquals(len(root.children), 1)
self.assertEquals(len(ftest1.children), 2)
self.assertEquals(len(ftest2.children), 0)
self.assertEquals(len(ftest3.children), 0)
for folder in (ftest2, ftest3):
self.assertFalse(folder in root.children)
self.assertTrue(folder in root.children_tree)
self.assertTrue(folder in ftest1.children)
self.assertTrue(folder in ftest1.children_tree)
self.assertTrue(folder.parent == ftest1)
self.assertFalse(ftest1 in folder.children)
self.assertFalse(root in folder.children)
self.assertFalse(ftest1 in folder.children_tree)
self.assertFalse(root in folder.children_tree)
self.assertEquals(len(user.folders), 3)
for folder in (ftest1, ftest2, ftest3):
self.assertTrue(folder in user.folders)
self.assertTrue(folder in user.folders_tree)
self.assertTrue(folder in root.children_tree)
# Move the folder in the hierarchy.
ftest3.set_parent(ftest2)
# HEADS UP: we need to reload ftest1 because
# id(ftest2) != id(ftest1.children[0]) for some
# obscure reason. This will implicitely reload
# a full folder hierarchy from the database.
ftest1.reload()
# These are not necessary.
#ftest2.reload()
#ftest3.reload()
self.assertEquals(len(root.children), 1)
self.assertEquals(len(ftest1.children), 1)
self.assertEquals(len(ftest2.children), 1)
self.assertEquals(len(ftest3.children), 0)
self.assertTrue(ftest3 in ftest2.children)
self.assertFalse(ftest3 in ftest1.children)
# HEADS UP: the result of this test depends on a user preference.
self.assertFalse(ftest3 in user.folders_tree)
user.preferences.selector.extended_folders_depth = True
user.preferences.save()
self.assertTrue(ftest3 in user.folders_tree)
self.assertTrue(ftest3 in ftest2.children_tree)
self.assertTrue(ftest3 in ftest1.children_tree)
self.assertTrue(ftest3 in root.children_tree)
def test_parent_chain_checking(self):
user = self.mongodb_user
root = user.root_folder
ftest1, created = Folder.add_folder('test1', user)
ftest2, created = Folder.add_folder('test2', user, ftest1)
ftest3, created = Folder.add_folder('test3', user, ftest2)
# root
self.assertTrue(root.is_parent_of(ftest1))
self.assertTrue(root.is_parent_of(ftest2))
self.assertTrue(root.is_parent_of(ftest3))
self.assertFalse(root.is_parent_of(root))
# ftest1
self.assertTrue(ftest1.is_parent_of(ftest2))
self.assertTrue(ftest1.is_parent_of(ftest3))
self.assertFalse(ftest1.is_parent_of(ftest1))
self.assertFalse(ftest1.is_parent_of(root))
# ftest2
self.assertTrue(ftest2.is_parent_of(ftest3))
self.assertFalse(ftest2.is_parent_of(ftest2))
self.assertFalse(ftest2.is_parent_of(ftest1))
self.assertFalse(ftest2.is_parent_of(root))
# ftest3
self.assertFalse(ftest3.is_parent_of(ftest3))
self.assertFalse(ftest3.is_parent_of(ftest2))
self.assertFalse(ftest3.is_parent_of(ftest1))
self.assertFalse(ftest3.is_parent_of(root))
def test_avoid_cycles(self):
user = self.mongodb_user
root = user.root_folder
ftest1, created = Folder.add_folder('test1', user)
ftest2, created = Folder.add_folder('test2', user, ftest1)
ftest3, created = Folder.add_folder('test3', user, ftest2)
self.assertTrue(ftest1.is_parent_of(ftest2))
self.assertTrue(ftest1.is_parent_of(ftest3))
self.assertFalse(ftest1.is_parent_of(ftest1))
self.assertFalse(ftest1.is_parent_of(root))
self.assertRaises(RuntimeError, ftest1.set_parent, ftest1)
self.assertFalse(ftest1 in ftest1.children_tree)
self.assertFalse(ftest1 in ftest2.children_tree)
self.assertFalse(ftest1 in ftest3.children_tree)
self.assertFalse(ftest2 in ftest2.children_tree)
self.assertFalse(ftest2 in ftest3.children_tree)
self.assertFalse(ftest3 in ftest3.children_tree)
self.assertRaises(RuntimeError, ftest2.add_child, ftest2)
self.assertRaises(TreeCycleException, ftest1.set_parent, ftest2)
self.assertRaises(TreeCycleException, ftest1.set_parent, ftest3)
self.assertRaises(TreeCycleException, ftest2.add_child, ftest1)
self.assertRaises(TreeCycleException, ftest3.add_child, ftest1)
self.assertRaises(TreeCycleException, ftest3.add_child, ftest2)
@override_settings(STATICFILES_STORAGE=
'pipeline.storage.NonPackagingPipelineStorage',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory',)
class GroupsTest(TestCase):
def setUp(self):
self.django_user1 = DjangoUser.objects.create_user(
username='testuser1', password='testpass',
email='test-ocE3f6VQqFaaAZ@1flow.io')
self.django_user2 = DjangoUser.objects.create_user(
username='testuser2', password='testpass',
email='test-ocE3f6VQqFaaBZ@1flow.io')
self.django_user3 = DjangoUser.objects.create_user(
username='testuser3', password='testpass',
email='test-ocE3f6VQqFaaCZ@1flow.io')
# Auto-created on PG's post_save().
self.alice = self.django_user1.mongo
self.bob = self.django_user2.mongo
self.john = self.django_user3.mongo
self.alice_friends = Group(name="Alice's friends",
creator=self.alice).save()
self.alice_work = Group(name="Alice's co-workers",
creator=self.alice).save()
self.bob_friends = Group(name="Bob's friends",
creator=self.bob).save()
self.john_friends = Group(name="John's friends",
creator=self.john).save()
def tearDown(self):
User.drop_collection()
Group.drop_collection()
def system_groups_are_always_here(self):
self.assertEquals(self.alice.all_relations_group.__class__, Group)
self.assertEquals(self.bob.all_relations_group.__class__, Group)
self.assertEquals(self.john.all_relations_group.__class__, Group)
self.assertEquals(self.alice.in_relations_of_group.__class__, Group)
self.assertEquals(self.bob.in_relations_of_group.__class__, Group)
self.assertEquals(self.john.in_relations_of_group.__class__, Group)
self.assertEquals(self.alice.blocked_group.__class__, Group)
self.assertEquals(self.bob.blocked_group.__class__, Group)
self.assertEquals(self.john.blocked_group.__class__, Group)
def basic_inter_relationships(self):
alice = self.alice
bob = self.bob
alice_friends = self.alice_friends
bob_friends = self.bob_friends
assertTrue = self.assertTrue
assertFalse = self.assertFalse
alice_friends.add_member(bob)
assertTrue(bob in alice_friends)
assertTrue(bob in alice.all_relations_group)
assertFalse(bob in alice.in_relations_of_group)
assertTrue(alice in bob.in_relations_of_group)
assertFalse(alice in bob.all_relations_group)
bob_friends.add_member(alice)
assertTrue(alice in bob_friends)
assertTrue(alice in bob.all_relations_group) # Alice is "promoted"
assertFalse(alice in bob.in_relations_of_group) # (idem)
assertTrue(bob in alice.all_relations_group) # Bob doesn't move.
assertFalse(bob in alice.in_relations_of_group) # (idem)
def unidirectional_relation_deletion(self):
bob = self.bob
john = self.john
bob_friends = self.bob_friends
assertTrue = self.assertTrue
assertFalse = self.assertFalse
bob_friends.add_member(john)
assertTrue(john in bob_friends)
assertTrue(john in bob.all_relations_group)
assertFalse(john in bob.in_relations_of_group)
assertFalse(bob in john.all_relations_group)
assertTrue(bob in john.in_relations_of_group)
bob_friends.delete_member(john)
assertFalse(john in bob_friends)
assertFalse(john in bob.all_relations_group)
assertFalse(john in bob.in_relations_of_group)
assertFalse(bob in john.all_relations_group)
assertFalse(bob in john.in_relations_of_group)
def bidirectional_relation_deletion(self):
bob = self.bob
john = self.john
bob_friends = self.bob_friends
john_friends = self.john_friends
assertTrue = self.assertTrue
assertFalse = self.assertFalse
bob_friends.add_member(john)
assertTrue(john in bob_friends)
assertTrue(john in bob.all_relations_group)
assertFalse(john in bob.in_relations_of_group)
assertFalse(bob in john.all_relations_group)
assertTrue(bob in john.in_relations_of_group)
john_friends.add_member(bob)
assertTrue(bob in john_friends)
assertTrue(bob in john.all_relations_group)
assertFalse(bob in john.in_relations_of_group)
assertTrue(bob in john.all_relations_group) # Bob is "promoted"
assertFalse(bob in john.in_relations_of_group)
bob_friends.delete_member(john)
assertFalse(john in bob_friends)
assertFalse(john in bob.all_relations_group)
assertTrue(john in bob.in_relations_of_group) # The unidir-rel remains.
assertTrue(bob in john.all_relations_group) # (idem)
assertFalse(bob in john.in_relations_of_group)
john_friends.delete_member(bob)
assertFalse(bob in john_friends)
assertFalse(bob in john.all_relations_group) # No trace left
assertFalse(bob in john.in_relations_of_group)
assertFalse(bob in john.all_relations_group) # (idem)
assertFalse(bob in john.in_relations_of_group)
def multi_groups_relationships(self):
alice = self.alice
bob = self.bob
john = self.john
alice_friends = self.alice_friends
alice_work = self.alice_work
bob_friends = self.bob_friends
john_friends = self.john_friends
assertTrue = self.assertTrue
assertFalse = self.assertFalse
def blocking_consequences_in_groups(self):
pass
|
1flow/1flow
|
oneflow/core/tests/test_models.py
|
Python
|
agpl-3.0
| 42,072
|
[
"Galaxy"
] |
8122f5256e763709a0a12bcfe3cfea86b6ff2effeca3bfb2630d5362ef0b9263
|
# Copyright (C) 2016
# Max Planck Institute for Polymer Research & JGU Mainz
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import filecmp
import glob
import espressopp
import unittest
import mpi4py.MPI as MPI
expected_files = [
'expected_gromacs_adress.gro'
]
def prewrite_expected_files(file_list_expected):
length_unit_test = "LJ"
header_lines = [
'system description, current step=0, length unit=%s\n' % (length_unit_test),
' 5\n'
]
lines_to_be_written_standard = [
'10000TTT TTT 1 5.000 5.500 5.000 0.0000 0.0000 0.0000\n',
'10000TTT TTT 2 5.000 6.500 5.000 0.0000 0.0000 0.0000\n',
'10000TTT TTT 3 5.000 7.500 5.000 0.0000 0.0000 0.0000\n',
'10000TTT TTT 4 5.000 8.500 5.000 0.0000 0.0000 0.0000\n',
'10000TTT TTT 5 5.000 9.500 5.000 0.0000 0.0000 0.0000\n'
]
lines_list = [lines_to_be_written_standard]
zipped_lists = zip(expected_files, lines_list)
for filename,lines in zipped_lists:
with open(filename, "w") as f:
for header_line in header_lines:
f.write(header_line)
for lineee in lines:
f.write(lineee)
box_line = " 10.00000 10.00000 10.00000\n"
f.write(box_line)
def remove_all_gro_files():
pattern = os.getcwd() + '/*.gro'
files_to_remove = glob.glob(pattern)
for file in files_to_remove:
os.remove(file)
class TestDumpGROAdress(unittest.TestCase):
def setUp(self):
prewrite_expected_files(expected_files)
# set up system
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, 1.5, 0.3)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
self.system = system
# def test_simple_gromacs_adress(self):
# particle_list = [
# (1, espressopp.Real3D(2.2319834598, 3.5858734534, 4.7485623451), espressopp.Real3D(2.2319834598, 1.5556734534, 4.7485623451), 0),
# (2, espressopp.Real3D(6.3459834598, 9.5858734534, 16.7485623451), espressopp.Real3D(3.2319834598, 1.5858734534, 1.7485623451), 0),
# (3, espressopp.Real3D(2.2319834598, 15.5858734534, 5.7485623451), espressopp.Real3D(4.2319834598, 2.5858734534, 2.7485623451), 2),
# (4, espressopp.Real3D(8.2319834598, 7.9958734534, 14.5325623451), espressopp.Real3D(5.2319834598, 6.5858734534, 18.7485623451), 3),
# (5, espressopp.Real3D(3.2319834598, 19.5858734534, 4.7485623451), espressopp.Real3D(6.2319834598, 8.5858734534, 7.7485623451), 1),
# ]
# self.system.storage.addParticles(particle_list, 'id', 'pos', 'v', 'type')
# file_gro_adress = "test_standard_dumpGROAdress_type_not_hardcoded.gro"
# dump_gro_adress = espressopp.io.DumpGROAdress(self.system, self.integrator, filename=file_gro, unfolded = False, length_factor = 1.0, length_unit = 'LJ', append = False)
# dump_gro_adress.dump()
# self.assertTrue(filecmp.cmp(file_gro_adress, expected_files[0], shallow = False), "!!! Error! Files are not equal!! They should be equal!")
def test_gromacs_adress(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
#tuples = [(1,2),(3,4),(5,6),(7,8),(9,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)
# add interaction
interNB = espressopp.interaction.VerletListHadressLennardJones2(vl, ftpl)
potWCA1 = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=1.4)
potWCA2 = espressopp.interaction.LennardJones(epsilon=0.0, sigma=1.0, shift='auto', cutoff=1.4)
interNB.setPotentialAT(type1=0, type2=0, potential=potWCA1) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potWCA2) # CG
self.system.addInteraction(interNB)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
file_gro_adress = "test_standard_dumpGROAdress_type_not_hardcoded.gro"
dump_gro_adress = espressopp.io.DumpGROAdress(self.system, ftpl, integrator, filename=file_gro_adress)
dump_gro_adress.dump()
self.assertTrue(filecmp.cmp(file_gro_adress, expected_files[0], shallow = False), "!!! Error! Files are not equal!! They should be equal!")
def tearDown(self):
remove_all_gro_files()
if __name__ == '__main__':
unittest.main()
|
acfogarty/espressopp
|
testsuite/FileIOTests/dump_gromacs_adress/test_dump_gromacs_adress.py
|
Python
|
gpl-3.0
| 6,768
|
[
"ESPResSo"
] |
b802a7e8c2932d8bbf9690133f697ced8d01075967411b6e41ee447ef6122167
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404,HttpResponseServerError
from django.shortcuts import render_to_response
from django import template
from django.template import Context, RequestContext
from django.template.defaultfilters import truncatewords
from django.utils import simplejson
from django.core import serializers
from bartender.models import Article
from quips.models import Quip, QuipForm
from facebook import Facebook
from facebookconnect.models import FacebookTemplate
import logging
def get_quips(request):
"""ajax request for more quips"""
if request.method == "POST":
i = None;
if request.POST.get('article_url',False):
recent_quips = Quip.objects.filter(created__gt=request.POST['since'],article=Article.objects.get(url=request.POST['article_url'])).order_by('-created')
else:
recent_quips = Quip.objects.filter(created__gt=request.POST['since']).order_by('-created')
if recent_quips:
context = RequestContext(request)
if request.POST['show_headline'] == 'true':
hedline = True
else:
hedline = False
context.update({'quips':recent_quips,'show_headline':hedline})
t = template.loader.get_template('quips/quip_list.html')
i = t.render(context)
json = simplejson.dumps({
'date':datetime.now().isoformat(' '),
'insert':i,
'quips':recent_quips.count(),
})
return HttpResponse(json, mimetype='application/json')
VERB_COLORS = {
'thinks': '#079107',
'loves': '#611739',
'feels': '#9d6884',
'agrees': '#cb8337',
'disagrees': '#6b6b6d',
'wonders': '#2a436a',
'hates': '#2e2a2b',
}
#@login_required
def create(request,option=None):
if request.method == "POST":
f = QuipForm(request.POST, instance=Quip(user=request.user))
if f.is_valid():
new_quip = f.save()
verb = f.instance.verb
template_data = {
"verb": verb,
"verb_color": VERB_COLORS[verb],
"quip": f.instance.message,
"url": settings.ROOT_URL + f.instance.get_absolute_url(),
"headline": truncatewords(f.instance.article.headline,20),
"article": truncatewords(f.instance.article.body,50),
}
template_bundle_id = FacebookTemplate.objects.get(name='quip').template_bundle_id
results = {
'success':True,
'date':datetime.now().isoformat(' '),
'template_bundle_id':template_bundle_id,
'template_data':template_data
}
else:
if 'message' in f.errors.keys():
errors = 'Please type a message.'
else:
errors = 'There was a problem. Sorry.'
results = {'success':False,'errors':errors}
json = simplejson.dumps(results)
if option:
return HttpResponse(json, mimetype='application/json')
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
raise Http404
@login_required
def flag_as_offensive(request,quip_id):
if request.method == "POST":
b = Quip.objects.get(pk=quip_id)
b.offensive = True
b.save()
return HttpResponseRedirect(b.get_absolute_url())
else:
raise Http404
def api_get(request):
form_url = getattr(request.GET,'form_url','')
try:
article = Article.objects.get(slug=request.GET['url'])
except Article.DoesNotExist:
article = Article(slug=request.GET['url'], headline=request.GET['headline'])
article.save()
context = RequestContext(request)
context.update({
'quips': Quip.objects.filter(article=article).order_by('-created'),
'quip_form': QuipForm(instance=Quip(user=context['user'],article=article)),
'style':'',
'url':form_url,
})
t = template.loader.get_template('quips/quips.html')
i = t.render(context)
t = template.loader.get_template('quips/quip_form.html')
f = t.render(context)
json = simplejson.dumps({
'insert':i,
'form':f
})
return HttpResponse(json, mimetype='application/json')
|
brianboyer/newsmixer
|
social/quips/views.py
|
Python
|
gpl-3.0
| 5,451
|
[
"Brian"
] |
1ed41ce136afab82671ffc12d26fe0830ac208c7c0337b58662e9608d3db3597
|
from __future__ import annotations
from xml.etree import ElementTree as ET
import numpy as np
from .._exceptions import WriteError
from .._helpers import register_format
def write(
filename,
mesh,
float_fmt: str = ".3f",
stroke_width: str | None = None,
# Use a default image_width (not None). If set to None, images will come out at the
# width of the mesh (which is okay). Some viewers (e.g., eog) have problems
# displaying SVGs of width around 1 since they interpret it as the width in pixels.
image_width: int | float | None = 100,
# ParaView's default colors
fill: str = "#c8c5bd",
stroke: str = "#000080",
):
if mesh.points.shape[1] == 3 and not np.allclose(
mesh.points[:, 2], 0.0, rtol=0.0, atol=1.0e-14
):
raise WriteError(
f"SVG can only handle flat 2D meshes (shape: {mesh.points.shape})"
)
pts = mesh.points[:, :2].copy()
min_x = np.min(pts[:, 0]) if len(pts) > 0 else 0.0
max_x = np.max(pts[:, 0]) if len(pts) > 0 else 0.0
min_y = np.min(pts[:, 1]) if len(pts) > 0 else 0.0
max_y = np.max(pts[:, 1]) if len(pts) > 0 else 0.0
pts[:, 1] = max_y + min_y - pts[:, 1]
width = max_x - min_x
height = max_y - min_y
if image_width is not None and width != 0:
scaling_factor = image_width / width
min_x *= scaling_factor
min_y *= scaling_factor
width *= scaling_factor
height *= scaling_factor
pts *= scaling_factor
if stroke_width is None:
stroke_width = str(width / 100)
fmt = " ".join(4 * [f"{{:{float_fmt}}}"])
svg = ET.Element(
"svg",
xmlns="http://www.w3.org/2000/svg",
version="1.1",
viewBox=fmt.format(min_x, min_y, width, height),
)
style = ET.SubElement(svg, "style")
opts = [
f"fill: {fill}",
f"stroke: {stroke}",
f"stroke-width: {stroke_width}",
"stroke-linejoin:bevel",
]
# Use path, not polygon, because svgo converts polygons to paths and doesn't convert
# the style alongside. No problem if it's paths all the way.
style.text = "path {" + "; ".join(opts) + "}"
for cell_block in mesh.cells:
if cell_block.type not in ["line", "triangle", "quad"]:
continue
if cell_block.type == "line":
fmt = (
f"M {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
)
elif cell_block.type == "triangle":
fmt = (
f"M {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
+ "Z"
)
elif cell_block.type == "quad":
fmt = (
f"M {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
+ f"L {{:{float_fmt}}} {{:{float_fmt}}}"
+ "Z"
)
for cell in cell_block.data:
ET.SubElement(
svg,
"path",
d=fmt.format(*pts[cell].flatten()),
)
tree = ET.ElementTree(svg)
tree.write(filename)
register_format("svg", [".svg"], None, {"svg": write})
|
nschloe/meshio
|
src/meshio/svg/_svg.py
|
Python
|
mit
| 3,363
|
[
"ParaView"
] |
54ee540090d2fcc88eaf4b8d40127eb6aaee9a642e1f32439d61978f8eeaa982
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of Policy Engine For Neutron"""
import contextlib
import StringIO
import urllib2
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import six.moves.urllib.request as urlrequest
import neutron
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron import context
from neutron import manager
from neutron.openstack.common import policy as common_policy
from neutron import policy
from neutron.tests import base
class PolicyFileTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.Context('fake', 'fake', is_admin=False)
self.target = {'tenant_id': 'fake'}
def test_modified_policy_reloads(self):
tmpfilename = self.get_temp_file_path('policy')
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ""}""")
cfg.CONF.set_override('policy_file', tmpfilename)
policy.refresh()
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": "!"}""")
policy.refresh()
self.target = {'tenant_id': 'fake_tenant'}
self.assertRaises(common_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
action,
self.target)
class PolicyTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
# NOTE(vish): preload rules to circumvent reloading from file
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": '!',
"example:get_http": "http:http://www.example.com",
"example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
policy.refresh()
# NOTE(vish): then overload underlying rules
policy.set_rules(dict((k, common_policy.parse_rule(v))
for k, v in rules.items()))
self.context = context.Context('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_check_bad_action_noraise(self):
action = "example:denied"
result = policy.check(self.context, action, self.target)
self.assertEqual(result, False)
def test_check_non_existent_action(self):
action = "example:idonotexist"
result_1 = policy.check(self.context, action, self.target)
self.assertFalse(result_1)
result_2 = policy.check(self.context, action, self.target,
might_not_exist=True)
self.assertTrue(result_2)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen',
return_value=StringIO.StringIO("True"))
def test_enforce_http_true(self, mock_urlrequest):
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
with mock.patch.object(urllib2, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
self.assertRaises(common_policy.PolicyNotAuthorized,
policy.enforce, self.context,
action, target)
def test_templatized_enforcement(self):
target_mine = {'tenant_id': 'fake'}
target_not_mine = {'tenant_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.Context('admin', 'fake', roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
tmpfilename = self.get_temp_file_path('policy.json')
self.rules = {
"default": '',
"example:exist": '!',
}
with open(tmpfilename, "w") as policyfile:
jsonutils.dump(self.rules, policyfile)
cfg.CONF.set_override('policy_file', tmpfilename)
policy.refresh()
self.context = context.Context('fake', 'fake')
def test_policy_called(self):
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
FAKE_RESOURCE_NAME = 'fake_resource'
FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy'
FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME:
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}},
# special plural name
"%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'):
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}}}
class NeutronPolicyTestCase(base.BaseTestCase):
def fakepolicyinit(self, **kwargs):
enf = policy._ENFORCER
enf.set_rules(common_policy.Rules(self.rules))
def setUp(self):
super(NeutronPolicyTestCase, self).setUp()
policy.refresh()
self.admin_only_legacy = "role:admin"
self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s"
# Add Fake resources to RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES)
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"context_is_admin": "role:admin",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or "
"tenant_id:%(network:tenant_id)s",
"admin_or_owner": ("rule:context_is_admin or "
"tenant_id:%(tenant_id)s"),
"admin_only": "rule:context_is_admin",
"regular_user": "role:user",
"shared": "field:networks:shared=True",
"external": "field:networks:router:external=True",
"network_device": "field:port:device_owner=~^network:",
"default": '@',
"create_network": "rule:admin_or_owner",
"create_network:shared": "rule:admin_only",
"update_network": '@',
"update_network:shared": "rule:admin_only",
"get_network": "rule:admin_or_owner or rule:shared or "
"rule:external or rule:context_is_advsvc",
"create_subnet": "rule:admin_or_network_owner",
"create_port:mac": "rule:admin_or_network_owner or "
"rule:context_is_advsvc",
"create_port:device_owner": "not rule:network_device",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"create_fake_resource": "rule:admin_or_owner",
"create_fake_resource:attr": "rule:admin_or_owner",
"create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner",
"create_fake_resource:attr:sub_attr_2": "rule:admin_only",
"create_fake_policy:": "rule:admin_or_owner",
"get_firewall_policy": "rule:admin_or_owner or "
"rule:shared",
"get_firewall_rule": "rule:admin_or_owner or "
"rule:shared"
}.items())
def remove_fake_resource():
del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]
self.patcher = mock.patch.object(neutron.policy,
'init',
new=self.fakepolicyinit)
self.patcher.start()
self.addCleanup(remove_fake_resource)
self.context = context.Context('fake', 'fake', roles=['user'])
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
fake_manager = self.manager_patcher.start()
fake_manager_instance = fake_manager.return_value
fake_manager_instance.plugin = plugin_klass()
def _test_action_on_attr(self, context, action, obj, attr, value,
exception=None, **kwargs):
action = "%s_%s" % (action, obj)
target = {'tenant_id': 'the_owner', attr: value}
if kwargs:
target.update(kwargs)
if exception:
self.assertRaises(exception, policy.enforce,
context, action, target)
else:
result = policy.enforce(context, action, target)
self.assertEqual(result, True)
def _test_nonadmin_action_on_attr(self, action, attr, value,
exception=None, **kwargs):
user_context = context.Context('', "user", roles=['user'])
self._test_action_on_attr(user_context, action, "network", attr,
value, exception, **kwargs)
def _test_advsvc_action_on_attr(self, action, obj, attr, value,
exception=None, **kwargs):
user_context = context.Context('', "user",
roles=['user', 'advsvc'])
self._test_action_on_attr(user_context, action, obj, attr,
value, exception, **kwargs)
def test_nonadmin_write_on_private_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', False,
common_policy.PolicyNotAuthorized)
def test_nonadmin_read_on_private_fails(self):
self._test_nonadmin_action_on_attr('get', 'shared', False,
common_policy.PolicyNotAuthorized)
def test_nonadmin_write_on_shared_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', True,
common_policy.PolicyNotAuthorized)
def test_create_port_device_owner_regex(self):
blocked_values = ('network:', 'network:abdef', 'network:dhcp',
'network:router_interface')
for val in blocked_values:
self._test_advsvc_action_on_attr(
'create', 'port', 'device_owner', val,
common_policy.PolicyNotAuthorized
)
ok_values = ('network', 'networks', 'my_network:test', 'my_network:')
for val in ok_values:
self._test_advsvc_action_on_attr(
'create', 'port', 'device_owner', val
)
def test_advsvc_get_network_works(self):
self._test_advsvc_action_on_attr('get', 'network', 'shared', False)
def test_advsvc_create_network_fails(self):
self._test_advsvc_action_on_attr('create', 'network', 'shared', False,
common_policy.PolicyNotAuthorized)
def test_advsvc_create_port_works(self):
self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False)
def test_advsvc_get_port_works(self):
self._test_advsvc_action_on_attr('get', 'port', 'shared', False)
def test_advsvc_update_port_works(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_advsvc_action_on_attr('update', 'port', 'shared', True,
**kwargs)
def test_advsvc_delete_port_works(self):
self._test_advsvc_action_on_attr('delete', 'port', 'shared', False)
def test_advsvc_create_subnet_fails(self):
self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False,
common_policy.PolicyNotAuthorized)
def test_nonadmin_read_on_shared_succeeds(self):
self._test_nonadmin_action_on_attr('get', 'shared', True)
def _test_enforce_adminonly_attribute(self, action, **kwargs):
admin_context = context.get_admin_context()
target = {'shared': True}
if kwargs:
target.update(kwargs)
result = policy.enforce(admin_context, action, target)
self.assertEqual(result, True)
def test_enforce_adminonly_attribute_create(self):
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_update(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_enforce_adminonly_attribute('update_network', **kwargs)
def test_reset_adminonly_attr_to_default_fails(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_nonadmin_action_on_attr('update', 'shared', False,
common_policy.PolicyNotAuthorized,
**kwargs)
def test_enforce_adminonly_attribute_no_context_is_admin_policy(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_nonadminctx_returns_403(self):
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def _test_build_subattribute_match_rule(self, validate_value):
bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate']
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = (
validate_value)
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
self.assertFalse(policy._build_subattr_match_rule(
'attr',
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'],
action,
target))
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk
def test_build_subattribute_match_rule_empty_dict_validator(self):
self._test_build_subattribute_match_rule({})
def test_build_subattribute_match_rule_wrong_validation_info(self):
self._test_build_subattribute_match_rule(
{'type:dict': 'wrong_stuff'})
def test_build_match_rule_special_pluralized(self):
action = "create_" + FAKE_SPECIAL_RESOURCE_NAME
pluralized = "create_fake_policies"
target = {}
result = policy._build_match_rule(action, target, pluralized)
self.assertEqual("rule:" + action, str(result))
def test_build_match_rule_normal_pluralized_when_create(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {}
result = policy._build_match_rule(action, target, None)
self.assertEqual("rule:" + action, str(result))
def test_enforce_subattribute(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
result = policy.enforce(self.context, action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
result = policy.enforce(context.get_admin_context(),
action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target, None)
def test_enforce_regularuser_on_read(self):
action = "get_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_policy_shared(self):
action = "get_firewall_policy"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_rule_shared(self):
action = "get_firewall_rule"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check(self):
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check_parent_resource(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_plugin_failure(self):
def fakegetnetwork(*args, **kwargs):
raise NotImplementedError('Blast!')
# the policy check and plugin method we use in this test are irrelevant
# so long that we verify that, if *f* blows up, the behavior of the
# policy engine to propagate the exception is preserved
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
self.assertRaises(NotImplementedError,
policy.enforce,
self.context,
action,
target)
def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
del self.rules['admin_or_network_owner']
self.rules['admin_or_network_owner'] = common_policy.parse_rule(
"role:admin or tenant_id:%(network_tenant_id)s")
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_tenant_id_check_no_target_field_raises(self):
# Try and add a bad rule
self.assertRaises(
exceptions.PolicyInitError,
common_policy.parse_rule,
'tenant_id:(wrong_stuff)')
def _test_enforce_tenant_id_raises(self, bad_rule):
self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule)
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
self.fakepolicyinit()
self.assertRaises(exceptions.PolicyCheckError,
policy.enforce,
self.context, action, target)
def test_enforce_tenant_id_check_malformed_target_field_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s')
def test_enforce_tenant_id_check_invalid_parent_resource_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s')
def test_get_roles_context_is_admin_rule_missing(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"some_other_rule": "role:admin",
}.items())
policy.set_rules(common_policy.Rules(rules))
# 'admin' role is expected for bw compatibility
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_role_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:admin",
}.items())
policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_rule_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:some_other_rule",
"some_other_rule": "role:admin",
}.items())
policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_or_check(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2",
"rule1": "role:admin_1",
"rule2": "role:admin_2"
}.items())
self.assertEqual(['admin_1', 'admin_2'],
policy.get_admin_roles())
def test_get_roles_with_other_rules(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:xxx or other:value",
}.items())
self.assertEqual(['xxx'], policy.get_admin_roles())
def _test_set_rules_with_deprecated_policy(self, input_rules,
expected_rules):
policy.set_rules(input_rules.copy())
# verify deprecated policy has been removed
for pol in input_rules.keys():
self.assertNotIn(pol, policy._ENFORCER.rules)
# verify deprecated policy was correctly translated. Iterate
# over items for compatibility with unittest2 in python 2.6
for rule in expected_rules:
self.assertIn(rule, policy._ENFORCER.rules)
self.assertEqual(str(policy._ENFORCER.rules[rule]),
expected_rules[rule])
def test_set_rules_with_deprecated_view_policy(self):
self._test_set_rules_with_deprecated_policy(
{'extension:router:view': 'rule:admin_or_owner'},
{'get_network:router:external': 'rule:admin_or_owner'})
def test_set_rules_with_deprecated_set_policy(self):
expected_policies = ['create_network:provider:network_type',
'create_network:provider:physical_network',
'create_network:provider:segmentation_id',
'update_network:provider:network_type',
'update_network:provider:physical_network',
'update_network:provider:segmentation_id']
self._test_set_rules_with_deprecated_policy(
{'extension:provider_network:set': 'rule:admin_only'},
dict((policy, 'rule:admin_only') for policy in
expected_policies))
def test_process_rules(self):
action = "create_" + FAKE_RESOURCE_NAME
# Construct RuleChecks for an action, attribute and subattribute
match_rule = common_policy.RuleCheck('rule', action)
attr_rule = common_policy.RuleCheck('rule', '%s:%ss' %
(action,
FAKE_RESOURCE_NAME))
sub_attr_rules = [common_policy.RuleCheck('rule', '%s:%s:%s' %
(action, 'attr',
'sub_attr_1'))]
# Build an AndCheck from the given RuleChecks
# Make the checks nested to better check the recursion
sub_attr_rules = common_policy.AndCheck(sub_attr_rules)
attr_rule = common_policy.AndCheck(
[attr_rule, sub_attr_rules])
match_rule = common_policy.AndCheck([match_rule, attr_rule])
# Assert that the rules are correctly extracted from the match_rule
rules = policy._process_rules_list([], match_rule)
self.assertEqual(['create_fake_resource',
'create_fake_resource:fake_resources',
'create_fake_resource:attr:sub_attr_1'], rules)
def test_log_rule_list(self):
with contextlib.nested(
mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True),
mock.patch.object(policy.LOG, 'debug')
) as (is_e, dbg):
policy.log_rule_list(common_policy.RuleCheck('rule', 'create_'))
self.assertTrue(is_e.called)
self.assertTrue(dbg.called)
|
alexandrucoman/vbox-neutron-agent
|
neutron/tests/unit/test_policy.py
|
Python
|
apache-2.0
| 28,958
|
[
"BLAST"
] |
812fb7a3556be763b5da236e5d051087eb78853232f11358f9bb09a7e12268fb
|
"""
Utility functions to mimic the template support functions for vtkVariant
"""
import vtk
_variant_type_map = {
'void' : vtk.VTK_VOID,
'char' : vtk.VTK_CHAR,
'unsigned char' : vtk.VTK_UNSIGNED_CHAR,
'signed char' : vtk.VTK_SIGNED_CHAR,
'short' : vtk.VTK_SHORT,
'unsigned short' : vtk.VTK_UNSIGNED_SHORT,
'int' : vtk.VTK_INT,
'unsigned int' : vtk.VTK_UNSIGNED_INT,
'long' : vtk.VTK_LONG,
'unsigned long' : vtk.VTK_UNSIGNED_LONG,
'long long' : vtk.VTK_LONG_LONG,
'unsigned long long' : vtk.VTK_UNSIGNED_LONG_LONG,
'__int64' : vtk.VTK___INT64,
'unsigned __int64' : vtk.VTK_UNSIGNED___INT64,
'float' : vtk.VTK_FLOAT,
'double' : vtk.VTK_DOUBLE,
'string' : vtk.VTK_STRING,
'unicode string' : vtk.VTK_UNICODE_STRING,
'vtkObjectBase' : vtk.VTK_OBJECT,
'vtkObject' : vtk.VTK_OBJECT,
}
_variant_method_map = {
vtk.VTK_VOID : '',
vtk.VTK_CHAR : 'ToChar',
vtk.VTK_UNSIGNED_CHAR : 'ToUnsignedChar',
vtk.VTK_SIGNED_CHAR : 'ToSignedChar',
vtk.VTK_SHORT : 'ToShort',
vtk.VTK_UNSIGNED_SHORT : 'ToUnsignedShort',
vtk.VTK_INT : 'ToInt',
vtk.VTK_UNSIGNED_INT : 'ToUnsignedInt',
vtk.VTK_LONG : 'ToLong',
vtk.VTK_UNSIGNED_LONG : 'ToUnsignedLong',
vtk.VTK_LONG_LONG : 'ToLongLong',
vtk.VTK_UNSIGNED_LONG_LONG : 'ToUnsignedLongLong',
vtk.VTK___INT64 : 'To__Int64',
vtk.VTK_UNSIGNED___INT64 : 'ToUnsigned__Int64',
vtk.VTK_FLOAT : 'ToFloat',
vtk.VTK_DOUBLE : 'ToDouble',
vtk.VTK_STRING : 'ToString',
vtk.VTK_UNICODE_STRING : 'ToUnicodeString',
vtk.VTK_OBJECT : 'ToVTKObject',
}
_variant_check_map = {
vtk.VTK_VOID : 'IsValid',
vtk.VTK_CHAR : 'IsChar',
vtk.VTK_UNSIGNED_CHAR : 'IsUnsignedChar',
vtk.VTK_SIGNED_CHAR : 'IsSignedChar',
vtk.VTK_SHORT : 'IsShort',
vtk.VTK_UNSIGNED_SHORT : 'IsUnsignedShort',
vtk.VTK_INT : 'IsInt',
vtk.VTK_UNSIGNED_INT : 'IsUnsignedInt',
vtk.VTK_LONG : 'IsLong',
vtk.VTK_UNSIGNED_LONG : 'IsUnsignedLong',
vtk.VTK_LONG_LONG : 'IsLongLong',
vtk.VTK_UNSIGNED_LONG_LONG : 'IsUnsignedLongLong',
vtk.VTK___INT64 : 'Is__Int64',
vtk.VTK_UNSIGNED___INT64 : 'IsUnsigned__Int64',
vtk.VTK_FLOAT : 'IsFloat',
vtk.VTK_DOUBLE : 'IsDouble',
vtk.VTK_STRING : 'IsString',
vtk.VTK_UNICODE_STRING : 'IsUnicodeString',
vtk.VTK_OBJECT : 'IsVTKObject',
}
def vtkVariantCreate(v, t):
"""
Create a vtkVariant of the specified type, where the type is in the
following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type.
"""
if not issubclass(type(t), int):
t = _variant_type_map[t]
return vtk.vtkVariant(v, t)
def vtkVariantExtract(v, t=None):
"""
Extract the specified value type from the vtkVariant, where the type is
in the following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type. Set the type to 'None" to
extract the value in its native type.
"""
v = vtk.vtkVariant(v)
if t == None:
t = v.GetType()
elif not issubclass(type(t), int):
t = _variant_type_map[t]
if getattr(v, _variant_check_map[t])():
return getattr(v, _variant_method_map[t])()
else:
return None
def vtkVariantCast(v, t):
"""
Cast the vtkVariant to the specified value type, where the type is
in the following format: 'int', 'unsigned int', etc. for numeric types,
and 'string' or 'unicode string' for strings. You can also use an
integer VTK type constant for the type.
"""
if not issubclass(type(t), int):
t = _variant_type_map[t]
v = vtk.vtkVariant(v, t)
if v.IsValid():
return getattr(v, _variant_method_map[t])()
else:
return None
def vtkVariantStrictWeakOrder(s1, s2):
"""
Compare variants by type first, and then by value. The return values
are -1, 0, 1 like the python cmp() method, for compatibility with the
python list sort() method. This is in contrast with the C++ version,
which returns true or false.
"""
s1 = vtk.vtkVariant(s1)
s2 = vtk.vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return cmp(t1,t2)
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return 0
elif v1 != v2:
return cmp(v1,v2)
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
# compare vtk objects by classname
if t1 == vtk.VTK_OBJECT:
return cmp(r1.GetClassName(), r2.GetClassName())
return cmp(r1, r2)
def vtkVariantStrictEquality(s1, s2):
"""
Check two variants for strict equality of type and value.
"""
s1 = vtk.vtkVariant(s1)
s2 = vtk.vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return False
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return True
elif v1 != v2:
return False
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
return (r1 == r2)
def vtkVariantLessThan(s1, s2):
"""
Return true if s1 < s2. This isn't very useful in Python.
"""
return (vtk.vtkVariant(s1) < vtk.vtkVariant(s2))
def vtkVariantEqual(s1, s2):
"""
Return true if s1 == s2. This isn't very useful in Python.
"""
return (vtk.vtkVariant(s1) == vtk.vtkVariant(s2))
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Wrapping/Python/vtk/util/vtkVariant.py
|
Python
|
mit
| 6,022
|
[
"VTK"
] |
d220b9d728b792a734e1153aee5d755a4caa79c779b7a5a96fadc2c81da9635c
|
import threading
import time
import sys
import hashlib
import requests
unuseful_codes = [404]
strict_codes = [100, 200, 300, 301, 302, 401, 403, 405, 500]
class Visitor(threading.Thread):
auth = None
banned_location = None
banned_md5 = None
cookies = None
delay = None
discriminator = None
headers = {}
is_allow_redirects = False
persist = False
proxy = None
requests = ""
size_discriminator = []
user_agent = None
killed = False
@staticmethod
def set_headers(headers):
Visitor.headers = headers
@staticmethod
def allow_redirects(pref):
Visitor.is_allow_redirects = pref
@staticmethod
def set_discriminator(discriminator):
Visitor.discriminator = discriminator
@staticmethod
def set_cookies(_cookies):
Visitor.cookies = _cookies
@staticmethod
def kill():
Visitor.killed = True
@staticmethod
def set_size_discriminator(size_discriminator):
if size_discriminator:
Visitor.size_discriminator = [int(x) for x in size_discriminator.split(",")]
else:
Visitor.size_discriminator = []
@staticmethod
def set_banned_location(banned_location):
Visitor.banned_location = banned_location
@staticmethod
def set_banned_md5(banned_md5):
Visitor.banned_md5 = banned_md5
@staticmethod
def set_user_agent(useragent):
Visitor.user_agent = useragent
@staticmethod
def set_proxy(proxy):
Visitor.proxy = proxy
@staticmethod
def set_delay(delay):
Visitor.delay = float(delay)
@staticmethod
def set_requests(type_request):
Visitor.requests = type_request
@staticmethod
def set_authentication(auth):
Visitor.auth = tuple(auth.split(":")) if auth else auth
@staticmethod
def set_persist(persist):
Visitor.persist = persist
def __init__(self, visitor_id, payload, results, lock):
threading.Thread.__init__(self)
self.visitor_id = visitor_id
self.payload = payload
self.results = results
self.session = None
self.lock = lock
def run(self):
try:
while not Visitor.killed:
task = self.payload.get()
if not task:
return
self.visit(task)
self.payload.task_done()
except Exception:
pass
def visit(self, task):
def _dumb_redirect(url):
origin = "{0}{1}".format(task.target, task.resource)
# Detect redirect to same page but ended with slash
if url == origin:
return True
if url == origin + "/":
return True
# Detect redirect to root
if url == task.target:
return True
return False
try:
if Visitor.user_agent:
Visitor.headers["User-Agent"] = Visitor.user_agent
# Persistent connections
if Visitor.persist:
if not self.session:
self.session = requests.Session()
else:
self.session = requests
r = None
if Visitor.proxy:
if Visitor.requests == "GET":
r = self.session.get(
task.get_complete_target(),
headers=Visitor.headers,
proxies=Visitor.proxy,
verify=False,
auth=Visitor.auth,
cookies=Visitor.cookies,
allow_redirects=Visitor.is_allow_redirects,
)
elif Visitor.requests == "HEAD":
r = self.session.head(
task.get_complete_target(),
headers=Visitor.headers,
proxies=Visitor.proxy,
verify=False,
auth=Visitor.auth,
cookies=Visitor.cookies,
allow_redirects=Visitor.is_allow_redirects,
)
else:
if Visitor.requests == "GET":
r = self.session.get(
task.get_complete_target(),
headers=Visitor.headers,
verify=False,
auth=Visitor.auth,
cookies=Visitor.cookies,
allow_redirects=Visitor.is_allow_redirects,
)
elif Visitor.requests == "HEAD":
r = self.session.head(
task.get_complete_target(),
headers=Visitor.headers,
verify=False,
auth=Visitor.auth,
cookies=Visitor.cookies,
allow_redirects=Visitor.is_allow_redirects,
)
tmp_content = r.content
task.response_size = len(tmp_content)
task.response_time = round(r.elapsed.microseconds / 1000, 2)
task.set_response_code(r.status_code)
# If discriminator is found we mark it 404
if sys.version_info[0] >= 3:
tmp_content = tmp_content.decode("Latin-1")
if Visitor.discriminator and Visitor.discriminator in tmp_content:
task.ignorable = True
if (
Visitor.banned_md5
and hashlib.md5("".join(tmp_content)).hexdigest() == self.banned_md5
):
task.ignorable = True
# Check if page size is not what we need
if task.response_size in Visitor.size_discriminator:
task.ignorable = True
# Look for interesting content
if task.content and (task.content in tmp_content):
task.content_has_detected(True)
# Look for a redirection
if Visitor.is_allow_redirects:
if len(r.history) > 0 and not _dumb_redirect(r.history[-1].url):
task.response_code = str(r.history[0].status_code)
task.location = r.history[-1].url
else:
if r.status_code >= 300 and r.status_code < 400:
task.set_response_code(404)
task.ignorable = True
if "content-type" in [h.lower() for h in r.headers.keys()]:
try:
task.response_type = r.headers["Content-Type"].split(";")[0]
except Exception:
pass
task.thread = self.visitor_id
self.lock.acquire()
self.results.put(task)
if Visitor.delay:
time.sleep(Visitor.delay)
except (requests.ConnectionError, requests.Timeout) as e:
# TODO log to a file instead of screen
print("[!] Timeout/Connection error")
print(e)
except Exception as e:
print("[!] General exception while visiting")
print(e)
finally:
self.lock.release()
return
|
deibit/cansina
|
core/visitor.py
|
Python
|
gpl-3.0
| 7,285
|
[
"VisIt"
] |
b14ed097cd38b1bd9e25efa8791337dfa04840b15ff8d1c22bb8035b8ea89caf
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import functools
import glob
import itertools
import jinja2
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import sys
import time
import urllib2
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.addons.base.ir.ir_qweb import AssetsBundle, QWebTemplateNotFound
from openerp.tools.translate import _
from openerp import http
from openerp.http import request, serialize_exception as _serialize_exception
_logger = logging.getLogger(__name__)
if hasattr(sys, 'frozen'):
# When running on compiled windows binary, we don't have access to package loader.
path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'views'))
loader = jinja2.FileSystemLoader(path)
else:
loader = jinja2.PackageLoader('openerp.addons.web', "views")
env = jinja2.Environment(loader=loader, autoescape=True)
env.filters["json"] = simplejson.dumps
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "OpenERP Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(simplejson.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def abort_and_redirect(url):
r = request.httprequest
response = werkzeug.utils.redirect(url, 302)
response = r.app.get_response(r, response, explicit_session=False)
werkzeug.exceptions.abort(response)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db')
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
if db and not request.session.db:
# User asked a specific database on a new session.
# That mean the nodb router has been used to find the route
# Depending on installed module in the database, the rendering of the page
# may depend on data injected by the database route dispatcher.
# Thus, we redirect the user to the same page but with the session cookie set.
# This will force using the database route dispatcher...
r = request.httprequest
url_redirect = r.base_url
if r.query_string:
# Can't use werkzeug.wrappers.BaseRequest.url with encoded hashes:
# https://github.com/amigrave/werkzeug/commit/b4a62433f2f7678c234cdcac6247a869f90a7eb7
url_redirect += '?' + r.query_string
response = werkzeug.utils.redirect(url_redirect, 302)
request.session.db = db
abort_and_redirect(url_redirect)
# if db not provided, use the session one
if not db and request.session.db and http.db_filter([request.session.db]):
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
abort_and_redirect(request.httprequest.url)
request.session.db = db
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed():
# Candidates module the current heuristic is the /static dir
loadable = http.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = request.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = request.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = http.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if debug is not None:
_logger.warning("openerp.addons.web.main.manifest_list(): debug parameter is deprecated")
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None, max_age=0):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def login_redirect():
url = '/web/login?'
if request.debug:
url += 'debug&'
return """<html><head><script>
window.location = '%sredirect=' + encodeURIComponent(window.location);
</script></head></html>
""" % (url,)
def load_actions_from_ir_values(key, key2, models, meta):
Values = request.session.model('ir.values')
actions = Values.get(key, key2, models, meta, request.context)
return [(id, name, clean_action(action))
for id, name, action in actions]
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = request.httprequest.user_agent.browser
version = int((request.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params, keep_hash=True)
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if request.session.uid:
if kw.get('redirect'):
return werkzeug.utils.redirect(kw.get('redirect'), 303)
return request.render('web.webclient_bootstrap')
else:
return login_redirect()
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
if uid is not False:
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = "Wrong login/password"
return request.render('web.login', values)
@http.route('/login', type='http', auth="none")
def login(self, db, login, key, redirect="/web", **kw):
if not http.db_filter([db]):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(db, login, key, redirect_url=redirect)
@http.route('/web/js/<xmlid>', type='http', auth="public")
def js_bundle(self, xmlid, **kw):
# manifest backward compatible mode, to be removed
values = {'manifest_list': manifest_list}
try:
assets_html = request.render(xmlid, lazy=False, qcontext=values)
except QWebTemplateNotFound:
return request.not_found()
bundle = AssetsBundle(xmlid, assets_html, debug=request.debug)
response = request.make_response(
bundle.js(), [('Content-Type', 'application/javascript')])
# TODO: check that we don't do weird lazy overriding of __call__ which break body-removal
return make_conditional(
response, bundle.last_modified, bundle.checksum, max_age=60*60*24)
@http.route('/web/css/<xmlid>', type='http', auth='public')
def css_bundle(self, xmlid, **kw):
values = {'manifest_list': manifest_list} # manifest backward compatible mode, to be removed
try:
assets_html = request.render(xmlid, lazy=False, qcontext=values)
except QWebTemplateNotFound:
return request.not_found()
bundle = AssetsBundle(xmlid, assets_html, debug=request.debug)
response = request.make_response(
bundle.css(), [('Content-Type', 'text/css')])
return make_conditional(
response, bundle.last_modified, bundle.checksum, max_age=60*60*24)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/qweb', type='http', auth="none")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
uid = openerp.SUPERUSER_ID
if mods is None:
m = request.registry.get('ir.module.module')
mods = [x['name'] for x in m.search_read(request.cr, uid,
[('state','=','installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
res_lang = request.registry.get('res.lang')
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = request.registry.get('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read(request.cr, uid, [('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return openerp.service.common.exp_version()
@http.route('/web/tests', type='http', auth="none")
def index(self, mod=None, **kwargs):
return request.render('web.qunit_suite')
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
base_url = request.httprequest.base_url
return Client(request.httprequest.app, BaseResponse).get(path, base_url=base_url).data
class Database(http.Controller):
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
try:
dbs = http.db_list()
if not dbs:
return http.local_redirect('/web/database/manager')
except openerp.exceptions.AccessDenied:
dbs = False
return env.get_template("database_selector.html").render({
'databases': dbs,
'debug': request.debug,
})
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
# TODO: migrate the webclient's database manager to server side views
request.session.logout()
return env.get_template("database_manager.html").render({
'modules': simplejson.dumps(module_boot()),
})
@http.route('/web/database/get_list', type='json', auth="none")
def get_list(self):
# TODO change js to avoid calling this method if in monodb mode
try:
return http.db_list()
except openerp.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
return [monodb]
raise
@http.route('/web/database/create', type='json', auth="none")
def create(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
db_created = request.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
if db_created:
request.session.authenticate(params['db_name'], 'admin', params['create_admin_pwd'])
return db_created
@http.route('/web/database/duplicate', type='json', auth="none")
def duplicate(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return request.session.proxy("db").duplicate_database(*duplicate_attrs)
@http.route('/web/database/drop', type='json', auth="none")
def drop(self, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if request.session.proxy("db").drop(password, db):
return True
else:
return False
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': 'Drop Database'}
except Exception:
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@http.route('/web/database/backup', type='http', auth="none")
def backup(self, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
request.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return request.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename))],
{'fileToken': token}
)
except Exception, e:
return simplejson.dumps([[],[{'error': openerp.tools.ustr(e), 'title': _('Backup Database')}]])
@http.route('/web/database/restore', type='http', auth="none")
def restore(self, db_file, restore_pwd, new_db, mode):
try:
copy = mode == 'copy'
data = base64.b64encode(db_file.read())
request.session.proxy("db").restore(restore_pwd, new_db, data, copy)
return ''
except openerp.exceptions.AccessDenied, e:
raise Exception("AccessDenied")
@http.route('/web/database/change_password', type='json', auth="none")
def change_password(self, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return request.session.proxy("db").change_admin_password(old_password, new_password)
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': _('Change Password')}
except Exception:
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(http.Controller):
def session_info(self):
request.session.ensure_valid()
return {
"session_id": request.session_id,
"uid": request.session.uid,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"username": request.session.login,
}
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.uid = request.session.uid
request.disable_db = False
return self.session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return self.session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if request.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return request.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed()
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
return request.httpsession.save_action(the_action)
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
return request.httpsession.get_action(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.assert_valid()
return None
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class Menu(http.Controller):
@http.route('/web/menu/get_user_roots', type='json', auth="user")
def get_user_roots(self):
""" Return all root menu ids visible for the session user.
:return: the root menu ids
:rtype: list(int)
"""
s = request.session
Menus = s.model('ir.ui.menu')
menu_domain = [('parent_id', '=', False)]
return Menus.search(menu_domain, 0, False, False, request.context)
@http.route('/web/menu/load', type='json', auth="user")
def load(self):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
Menus = request.session.model('ir.ui.menu')
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots()
menu_roots = Menus.read(menu_root_ids, fields, request.context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([('id', 'child_of', menu_root_ids)], 0, False, False, request.context)
menu_items = Menus.read(menu_ids, fields, request.context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@http.route('/web/menu/load_needaction', type='json', auth="user")
def load_needaction(self, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return request.session.model('ir.ui.menu').get_needaction_data(menu_ids, request.context)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.session.model(model)
records = Model.search_read(domain, fields, offset or 0, limit or False, sort or False,
request.context)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain, request.context)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
m = request.session.model(model)
value = {}
r = m.read([id], False, request.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method in ('read', 'search_read') and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
if method == 'read':
names = dict(request.session.model(model).name_get(args[0], **kwargs))
else:
names = dict(request.session.model(model).name_search('', args[0], **kwargs))
args[1].remove('display_name')
records = getattr(request.session.model(model), method)(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "{0}#{1}".format(model, (record['id']))
return records
if method.startswith('_'):
raise Exception("Access Denied: Underscore prefixed methods cannot be remotely called")
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/exec_workflow', type='json', auth="user")
def exec_workflow(self, model, id, signal):
return request.session.exec_workflow(model, id, signal)
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(http.Controller):
@http.route('/web/view/add_custom', type='json', auth="user")
def add_custom(self, view_id, arch):
CustomView = request.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return {'result': True}
@http.route('/web/view/undo_custom', type='json', auth="user")
def undo_custom(self, view_id, reset=False):
CustomView = request.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', request.session.uid), ('ref_id' ,'=', view_id)],
0, False, False, request.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, request.context)
else:
CustomView.unlink([vcustom[0]], request.context)
return {'result': True}
return {'result': False}
class TreeView(View):
@http.route('/web/treeview/action', type='json', auth="user")
def action(self, model, id):
return load_actions_from_ir_values(
'action', 'tree_but_open',[(model, id)],
False)
class Binary(http.Controller):
@http.route('/web/binary/image', type='http', auth="user")
def image(self, model, id, field, **kw):
last_update = '__last_update'
Model = request.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = request.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(request.session_id).hexdigest()
retag = hashed_session
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
try:
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], request.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
if not id:
res = Model.default_get([field], request.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], request.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except Exception:
image_data = self.placeholder()
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return request.make_response(image_data, headers)
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@http.route('/web/binary/saveas', type='http', auth="user")
@serialize_exception
def saveas(self, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, request.context)[0]
else:
res = Model.default_get(fields, request.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return request.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
@http.route('/web/binary/saveas_ajax', type='http', auth="user")
@serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
Model = request.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception:
args = {'error': "Something horrible happened"}
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none")
def company_logo(self, dbname=None, **kw):
# TODO add etag, refactor to use /image code for etag
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder('logo.png')
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = str(row[0]).decode('base64')
else:
image_data = self.placeholder('nologo.png')
except Exception:
image_data = self.placeholder('logo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return request.make_response(image_data, headers)
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, do_not_eval=False):
Actions = request.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = request.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], request.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(request.context)
action = request.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
return_action = request.session.model('ir.actions.server').run(
[action_id], request.context)
if return_action:
return clean_action(return_action)
else:
return False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'csv', 'label': 'CSV'},
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT required"},
]
def fields_get(self, model):
Model = request.session.model(model)
fields = Model.fields_get(False, request.context)
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: field[1].get('string', ''))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.session.model("ir.exports").read([export_id])[0]
export_fields_list = request.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).iteritems())
class ExportFormat(object):
raw_data = False
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
params = simplejson.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
params)
Model = request.session.model(model)
context = dict(request.context or {}, **params.get('context', {}))
ids = ids or Model.search(domain, 0, False, False, context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, self.raw_data, context=context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
# Excel needs raw data to correctly handle numbers and date values
raw_data = True
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
base_style = xlwt.easyxf('align: wrap yes')
date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD')
datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
cell_style = base_style
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
elif isinstance(cell_value, datetime.datetime):
cell_style = datetime_style
elif isinstance(cell_value, datetime.date):
cell_style = date_style
worksheet.write(row_index + 1, cell_index, cell_value, cell_style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(http.Controller):
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@http.route('/web/report', type='http', auth="user")
@serialize_exception
def index(self, action, token):
action = simplejson.loads(action)
report_srv = request.session.proxy("report")
context = dict(request.context)
context.update(action["context"])
report_data = {}
report_ids = context.get("active_ids", None)
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
request.session.db, request.session.uid, request.session.password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
request.session.db, request.session.uid, request.session.password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = request.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return request.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
act_window_obj = request.session.model('ir.actions.act_window')
ir_model_data = request.session.model('ir.model.data')
try:
action_id = ir_model_data.get_object_reference('base', 'open_module_tree')[1]
action = act_window_obj.read(action_id, ['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = ir_model_data.get_object_reference('base', 'module_%s' % app)[1]
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
dkodnik/arp
|
addons/web/controllers/main.py
|
Python
|
agpl-3.0
| 69,202
|
[
"VisIt"
] |
4669968e882b1204aca877bebbb82e44d034580ef434c3b0ec06cf787494517d
|
# Copyright (C) 2013 by Ben Morris (ben@bendmorris.com)
# based on code by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to CDAO trees.
See classes in `Bio.Nexus`: Trees.Tree, Trees.NodeData, and Nodes.Chain.
"""
from Bio.Phylo import BaseTree
class Tree(BaseTree.Tree):
"""CDAO Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight
# a list of (predicate, object) pairs, containing additional triples
# using this tree as subject
self.attributes = []
class Clade(BaseTree.Clade):
"""CDAO Clade (sub-tree) object."""
def __init__(self, branch_length=1.0, name=None, clades=None,
confidence=None, comment=None):
BaseTree.Clade.__init__(self, branch_length=branch_length,
name=name, clades=clades, confidence=confidence)
self.comment = comment
# a list of (predicate, object) pairs, containing additional triples
# using this clade as subject
self.attributes = []
self.tu_attributes = []
self.edge_attributes = []
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/CDAO.py
|
Python
|
mit
| 1,443
|
[
"Biopython"
] |
8463c8288bdd3c2360461a6951b5acc8b9daea75a25175ae204d1f0d2afa080d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides plotting capabilities for battery related applications.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 12, 2012"
from collections import OrderedDict
from pymatgen.util.plotting import pretty_plot
class VoltageProfilePlotter:
"""
A plotter to make voltage profile plots for batteries.
Args:
xaxis: The quantity to use as the xaxis. Can be either capacity (the
default), or the frac_x.
"""
def __init__(self, xaxis="capacity"):
self._electrodes = OrderedDict()
self.xaxis = xaxis
def add_electrode(self, electrode, label=None):
"""
Add an electrode to the plot.
Args:
electrode: An electrode. All electrodes satisfying the
AbstractElectrode interface should work.
label: A label for the electrode. If None, defaults to a counting
system, i.e. 'Electrode 1', 'Electrode 2', ...
"""
if not label:
label = "Electrode {}".format(len(self._electrodes) + 1)
self._electrodes[label] = electrode
def get_plot_data(self, electrode):
x = []
y = []
cap = 0
most_discharged = electrode[-1].frac_discharge
norm = most_discharged / (1 - most_discharged)
for vpair in electrode:
if self.xaxis == "capacity":
x.append(cap)
cap += vpair.mAh / electrode.normalization_mass
x.append(cap)
else:
x.append(vpair.frac_charge / (1 - vpair.frac_charge) / norm)
x.append(vpair.frac_discharge / (1 - vpair.frac_discharge)
/ norm)
y.extend([vpair.voltage] * 2)
x.append(x[-1])
y.append(0)
return x, y
def get_plot(self, width=8, height=8):
"""
Returns a plot object.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(width, height)
for label, electrode in self._electrodes.items():
(x, y) = self.get_plot_data(electrode)
plt.plot(x, y, '-', linewidth=2, label=label)
plt.legend()
if self.xaxis == "capacity":
plt.xlabel('Capacity (mAh/g)')
else:
plt.xlabel('Fraction')
plt.ylabel('Voltage (V)')
plt.tight_layout()
return plt
def show(self, width=8, height=6):
"""
Show the voltage profile plot.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
"""
self.get_plot(width, height).show()
def save(self, filename, image_format="eps", width=8, height=6):
"""
Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps.
"""
self.get_plot(width, height).savefig(filename, format=image_format)
|
dongsenfo/pymatgen
|
pymatgen/apps/battery/plotter.py
|
Python
|
mit
| 3,385
|
[
"pymatgen"
] |
1ce1aa60b9991d7ac99fcb533282e96d105d7b2395b3483ec4fbbe003affcd92
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import json
import os
import tempfile
from six import binary_type
from .common import BaseTest, ACCOUNT_ID, Bag, TestConfig as Config
from .test_s3 import destroyBucket
from c7n.resolver import ValuesFrom, URIResolver
class FakeCache(object):
def __init__(self):
self.state = {}
def get(self, key):
return self.state.get(key)
def save(self, key, data):
self.state[key] = data
class FakeResolver(object):
def __init__(self, contents):
if isinstance(contents, binary_type):
contents = contents.decode("utf8")
self.contents = contents
def resolve(self, uri):
return self.contents
class ResolverTest(BaseTest):
def test_resolve_s3(self):
session_factory = self.replay_flight_data("test_s3_resolver")
session = session_factory()
client = session.client("s3")
resource = session.resource("s3")
bname = "custodian-byebye"
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
key = resource.Object(bname, "resource.json")
content = json.dumps({"moose": {"soup": "duck"}})
key.put(
Body=content, ContentLength=len(content), ContentType="application/json"
)
cache = FakeCache()
resolver = URIResolver(session_factory, cache)
uri = "s3://%s/resource.json?RequestPayer=requestor" % bname
data = resolver.resolve(uri)
self.assertEqual(content, data)
self.assertEqual(list(cache.state.keys()), [("uri-resolver", uri)])
def test_resolve_file(self):
content = json.dumps({"universe": {"galaxy": {"system": "sun"}}})
cache = FakeCache()
resolver = URIResolver(None, cache)
with tempfile.NamedTemporaryFile(mode="w+", dir=os.getcwd(), delete=False) as fh:
self.addCleanup(os.unlink, fh.name)
fh.write(content)
fh.flush()
self.assertEqual(resolver.resolve("file:%s" % fh.name), content)
class UrlValueTest(BaseTest):
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(tempfile.gettempdir())
def tearDown(self):
os.chdir(self.old_dir)
def get_values_from(self, data, content):
config = Config.empty(account_id=ACCOUNT_ID)
mgr = Bag({"session_factory": None, "_cache": None, "config": config})
values = ValuesFrom(data, mgr)
values.resolver = FakeResolver(content)
return values
def test_json_expr(self):
values = self.get_values_from(
{"url": "moon", "expr": "[].bean", "format": "json"},
json.dumps([{"bean": "magic"}]),
)
self.assertEqual(values.get_values(), ["magic"])
def test_invalid_format(self):
values = self.get_values_from({"url": "mars"}, "")
self.assertRaises(ValueError, values.get_values)
def test_txt(self):
with open("resolver_test.txt", "w") as out:
for i in ["a", "b", "c", "d"]:
out.write("%s\n" % i)
with open("resolver_test.txt", "rb") as out:
values = self.get_values_from({"url": "letters.txt"}, out.read())
os.remove("resolver_test.txt")
self.assertEqual(values.get_values(), ["a", "b", "c", "d"])
def test_csv_expr(self):
with open("test_expr.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
with open("test_expr.csv", "rb") as out:
values = self.get_values_from(
{"url": "sun.csv", "expr": "[*][2]"}, out.read()
)
os.remove("test_expr.csv")
self.assertEqual(values.get_values(), ["2", "2", "2", "2", "2"])
def test_csv_expr_using_dict(self):
with open("test_dict.csv", "w") as out:
writer = csv.writer(out)
writer.writerow(["aa", "bb", "cc", "dd", "ee"]) # header row
writer.writerows([range(5) for r in range(5)])
with open("test_dict.csv", "rb") as out:
values = self.get_values_from(
{"url": "sun.csv", "expr": "bb[1]", "format": "csv2dict"}, out.read()
)
os.remove("test_dict.csv")
self.assertEqual(values.get_values(), "1")
def test_csv_column(self):
with open("test_column.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
with open("test_column.csv", "rb") as out:
values = self.get_values_from({"url": "sun.csv", "expr": 1}, out.read())
os.remove("test_column.csv")
self.assertEqual(values.get_values(), ["1", "1", "1", "1", "1"])
def test_csv_raw(self):
with open("test_raw.csv", "w") as out:
writer = csv.writer(out)
writer.writerows([range(3, 4) for r in range(5)])
with open("test_raw.csv", "rb") as out:
values = self.get_values_from({"url": "sun.csv"}, out.read())
os.remove("test_raw.csv")
self.assertEqual(values.get_values(), [["3"], ["3"], ["3"], ["3"], ["3"]])
def test_value_from_vars(self):
values = self.get_values_from(
{"url": "{account_id}", "expr": '["{region}"][]', "format": "json"},
json.dumps({"us-east-1": "east-resource"}),
)
self.assertEqual(values.get_values(), ["east-resource"])
self.assertEqual(values.data.get("url", ""), ACCOUNT_ID)
|
ewbankkit/cloud-custodian
|
tests/test_resolver.py
|
Python
|
apache-2.0
| 6,165
|
[
"Galaxy",
"MOOSE"
] |
370085f1a61bbebe38d4f99497215c30e883f24953f7ef3a90efdaa2ecad2a94
|
# standard modules
import os
import re
# custom modules
import Reader
class Parser(object):
def __init__(self):
self._readers = dict()
self.path = None
self.runmatch = dict()
self._runcache = None
def get_atom_indices(self, selector):
"""
:param selector: Valid selection string.
:return: List of 0-based atom indices.
"""
u = self._readers.itervalues().next().get_universe()
if isinstance(u, Reader.EmptyUniverse):
return []
ag = u.select_atoms(selector)
return [_.index for _ in ag]
def get_runs(self):
""" Discovers all available runs in this bucket.
:return: Dict of run names available in this bucket. Keys: paths, values: names.
"""
if self._runcache is not None:
return self._runcache
# regular runs
inodes = os.listdir(self.path)
directories = [_ for _ in inodes if os.path.isdir(os.path.join(self.path, _))]
runs = {_: _ for _ in directories if _.startswith('run-')}
# alternative run directories
for root, dirs, files in os.walk(self.path):
relpath = os.path.relpath(root, self.path)
for regex, replace in self.runmatch.iteritems():
g = re.match(regex, relpath)
if g is not None:
runs[relpath] = replace.format(**g.groupdict())
self._runcache = runs
return self._runcache
def get_universe(self, run):
return self._readers[run].get_universe()
def get_input(self, run):
return self._readers[run].get_input()
def get_output(self, run, alias):
o = self._readers[run].get_output()
o['run'] = alias
return o
def get_groups(self, run, groups):
u = self.get_universe(run)
if isinstance(u, Reader.EmptyUniverse):
return {key: [] for (key, value) in groups.iteritems()}
return {key: u.atoms[value] for (key, value) in groups.iteritems()}
def get_trajectory_frames(self, run):
return self._readers[run].get_trajectory_frames()
def get_run_code(self, runpath, topologyfiles, configfiles, logfiles):
readers = {'cp2k': Reader.CP2KReader, 'namd': Reader.NAMDReader}
for label, reader in readers.iteritems():
r = reader(runpath)
if 'inputnames' in r.get_options():
r.inputnames = configfiles + r.inputnames
if 'topologies' in r.get_options():
r.topologies = topologyfiles + r.topologies
if 'logs' in r.get_options():
r.logs = logfiles + r.logs
if r.claims():
return label
def run(self, path, runmatch=dict(), topologyfiles=[], configfiles=[], logfiles=[]):
""" Parses all runs of a certain bucket.
:param path: Basepath of all runs in this bucket.
:param runmatch: For run autodiscovery: dict of regular expressions matching relative paths from bucket root as
keys and named group replacements as values.
"""
self.path = path
self.runmatch = runmatch
for run in self.get_runs():
code = self.get_run_code(os.path.join(path, run), topologyfiles, configfiles, logfiles)
if code == 'cp2k':
self._readers[run] = Reader.CP2KReader(os.path.join(path, run))
elif code == 'namd':
self._readers[run] = Reader.NAMDReader(os.path.join(path, run))
else:
raise NotImplementedError()
self._readers[run].read()
|
ferchault/iago
|
src/iago/Parser.py
|
Python
|
mit
| 3,093
|
[
"CP2K",
"NAMD"
] |
4e15c861e6bfcfeaea60a93e22477786b866ccd2d6a224bc62da3a5fb1222b56
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0083_auto_20150828_2346'),
]
operations = [
migrations.AlterModelOptions(
name='involvementtype',
options={'ordering': ('is_custom', 'index')},
),
migrations.AlterModelOptions(
name='issueprek',
options={'ordering': ('is_custom', 'index')},
),
migrations.AlterModelOptions(
name='issueprimary',
options={'ordering': ('is_custom', 'index')},
),
migrations.AlterModelOptions(
name='participanttype',
options={'ordering': ('is_custom', 'index')},
),
migrations.AlterModelOptions(
name='requestedservice',
options={'ordering': ('is_custom', 'index')},
),
]
|
koebbe/homeworks
|
visit/migrations/0084_auto_20150829_2034.py
|
Python
|
mit
| 954
|
[
"VisIt"
] |
5e3f1df1b6e52d277d6ef26f899b2b9a1ec923a405d39871826dbbe79bdf8b01
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces to assorted Freesurfer utility programs.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import re
from nipype.utils.filemanip import fname_presuffix, split_filename
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import TraitedSpec, File, traits, OutputMultiPath, isdefined, CommandLine, CommandLineInputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
class SampleToSurfaceInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--mov %s",
desc="volume to sample values from")
reference_file = File(exists=True, argstr="--ref %s",
desc="reference volume (default is orig.mgz)")
hemi = traits.Enum("lh", "rh", mandatory=True, argstr="--hemi %s",
desc="target hemisphere")
surface = traits.String(argstr="--surf %s", desc="target surface (default is white)")
reg_xors = ["reg_file", "reg_header", "mni152reg"]
reg_file = File(exists=True, argstr="--reg %s", mandatory=True, xor=reg_xors,
desc="source-to-reference registration file")
reg_header = traits.Bool(argstr="--regheader %s", requires=["subject_id"],
mandatory=True, xor=reg_xors,
desc="register based on header geometry")
mni152reg = traits.Bool(argstr="--mni152reg",
mandatory=True, xor=reg_xors,
desc="source volume is in MNI152 space")
apply_rot = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--rot %.3f %.3f %.3f",
desc="rotation angles (in degrees) to apply to reg matrix")
apply_trans = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--trans %.3f %.3f %.3f",
desc="translation (in mm) to apply to reg matrix")
override_reg_subj = traits.Bool(argstr="--srcsubject %s", requires=["subject_id"],
desc="override the subject in the reg file header")
sampling_method = traits.Enum("point", "max", "average",
mandatory=True, argstr="%s", xor=["projection_stem"],
requires=["sampling_range", "sampling_units"],
desc="how to sample -- at a point or at the max or average over a range")
sampling_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="sampling range - a point or a tuple of (min, max, step)")
sampling_units = traits.Enum("mm", "frac", desc="sampling range type -- either 'mm' or 'frac'")
projection_stem = traits.String(mandatory=True, xor=["sampling_method"],
desc="stem for precomputed linear estimates and volume fractions")
smooth_vol = traits.Float(argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)")
smooth_surf = traits.Float(argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)")
interp_method = traits.Enum("nearest", "trilinear", argstr="--interp %s",
desc="interpolation method")
cortex_mask = traits.Bool(argstr="--cortex", xor=["mask_label"],
desc="mask the target surface with hemi.cortex.label")
mask_label = File(exists=True, argstr="--mask %s", xor=["cortex_mask"],
desc="label file to mask output with")
float2int_method = traits.Enum("round", "tkregister", argstr="--float2int %s",
desc="method to convert reg matrix values (default is round)")
fix_tk_reg = traits.Bool(argstr="--fixtkreg", desc="make reg matrix round-compatible")
subject_id = traits.String(desc="subject id")
target_subject = traits.String(argstr="--trgsubject %s",
desc="sample to surface of different subject than source")
surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"],
desc="use surface registration to target subject")
ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"],
desc="icosahedron order when target_subject is 'ico'")
reshape = traits.Bool(argstr="--reshape", xor=["no_reshape"],
desc="reshape surface vector to fit in non-mgh format")
no_reshape = traits.Bool(argstr="--noreshape", xor=["reshape"],
desc="do not reshape surface vector (default)")
reshape_slices = traits.Int(argstr="--rf %d", desc="number of 'slices' for reshaping")
scale_input = traits.Float(argstr="--scale %.3f",
desc="multiple all intensities by scale factor")
frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)")
out_file = File(argstr="--o %s", genfile=True, desc="surface file to write")
out_type = traits.Enum(filetypes, argstr="--out_type %s", desc="output file type")
hits_file = traits.Either(traits.Bool, File(exists=True), argstr="--srchit %s",
desc="save image with number of hits at each voxel")
hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type")
vox_file = traits.Either(traits.Bool, File, argstr="--nvox %s",
desc="text file with the number of voxels intersecting the surface")
class SampleToSurfaceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="surface file")
hits_file = File(exists=True, desc="image with number of hits at each voxel")
vox_file = File(exists=True,
desc="text file with the number of voxels intersecting the surface")
class SampleToSurface(FSCommand):
"""Sample a volume to the cortical surface using Freesurfer's mri_vol2surf.
You must supply a sampling method, range, and units. You can project
either a given distance (in mm) or a given fraction of the cortical
thickness at that vertex along the surface normal from the target surface,
and then set the value of that vertex to be either the value at that point
or the average or maximum value found along the projection vector.
By default, the surface will be saved as a vector with a length equal to the
number of vertices on the target surface. This is not a problem for Freesurfer
programs, but if you intend to use the file with interfaces to another package,
you must set the ``reshape`` input to True, which will factor the surface vector
into a matrix with dimensions compatible with proper Nifti files.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> sampler = fs.SampleToSurface(hemi="lh")
>>> sampler.inputs.source_file = "cope1.nii.gz"
>>> sampler.inputs.reg_file = "register.dat"
>>> sampler.inputs.sampling_method = "average"
>>> sampler.inputs.sampling_range = 1
>>> sampler.inputs.sampling_units = "frac"
>>> res = sampler.run() # doctest: +SKIP
"""
_cmd = "mri_vol2surf"
input_spec = SampleToSurfaceInputSpec
output_spec = SampleToSurfaceOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name == "sampling_method":
range = self.inputs.sampling_range
units = self.inputs.sampling_units
if units == "mm":
units = "dist"
if isinstance(range, tuple):
range = "%.3f %.3f %.3f" % range
else:
range = "%.3f" % range
method = dict(point="", max="-max", average="-avg")[value]
return "--proj%s%s %s" % (units, method, range)
if name == "reg_header":
return spec.argstr % self.inputs.subject_id
if name == "override_reg_subj":
return spec.argstr % self.inputs.subject_id
if name in ["hits_file", "vox_file"]:
return spec.argstr % self._get_outfilename(name)
return super(SampleToSurface, self)._format_arg(name, spec, value)
def _get_outfilename(self, opt="out_file"):
outfile = getattr(self.inputs, opt)
if not isdefined(outfile) or isinstance(outfile, bool):
if isdefined(self.inputs.out_type):
if opt == "hits_file":
suffix = '_hits.' + self.filemap[self.inputs.out_type]
else:
suffix = '.' + self.filemap[self.inputs.out_type]
elif opt == "hits_file":
suffix = "_hits.mgz"
else:
suffix = '.mgz'
outfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix=suffix,
use_ext=False)
return outfile
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self._get_outfilename())
hitsfile = self.inputs.hits_file
if isdefined(hitsfile):
outputs["hits_file"] = hitsfile
if isinstance(hitsfile, bool):
hitsfile = self._get_outfilename("hits_file")
voxfile = self.inputs.vox_file
if isdefined(voxfile):
if isinstance(voxfile, bool):
voxfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix="_vox.txt",
use_ext=False)
outputs["vox_file"] = voxfile
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSmoothInputSpec(FSTraitedSpec):
in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file")
subject_id = traits.String(mandatory=True, argstr="--s %s", desc="subject id of surface file")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on")
fwhm = traits.Float(argstr="--fwhm %.4f", xor=["smooth_iters"],
desc="effective FWHM of the smoothing process")
smooth_iters = traits.Int(argstr="--smooth %d", xor=["fwhm"],
desc="iterations of the smoothing process")
cortex = traits.Bool(True, argstr="--cortex", usedefault=True, desc="only smooth within $hemi.cortex.label")
reshape = traits.Bool(argstr="--reshape",
desc="reshape surface vector to fit in non-mgh format")
out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write")
class SurfaceSmoothOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="smoothed surface file")
class SurfaceSmooth(FSCommand):
"""Smooth a surface image with mri_surf2surf.
The surface is smoothed by an interative process of averaging the
value at each vertex with those of its adjacent neighbors. You may supply
either the number of iterations to run or a desired effective FWHM of the
smoothing process. If the latter, the underlying program will calculate
the correct number of iterations internally.
.. seealso::
SmoothTessellation() Interface
For smoothing a tessellated surface (e.g. in gifti or .stl)
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> smoother = fs.SurfaceSmooth()
>>> smoother.inputs.in_file = "lh.cope1.mgz"
>>> smoother.inputs.subject_id = "subj_1"
>>> smoother.inputs.hemi = "lh"
>>> smoother.inputs.fwhm = 5
>>> smoother.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceSmoothInputSpec
output_spec = SurfaceSmoothOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
in_file = self.inputs.in_file
if isdefined(self.inputs.fwhm):
kernel = self.inputs.fwhm
else:
kernel = self.inputs.smooth_iters
outputs["out_file"] = fname_presuffix(in_file,
suffix="_smooth%d" % kernel,
newpath=os.getcwd())
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--sval %s",
xor=['source_annot_file'],
desc="surface file with source values")
source_annot_file = File(exists=True, mandatory=True,
argstr="--sval-annot %s",
xor=['source_file'],
desc="surface annotation file")
source_subject = traits.String(mandatory=True, argstr="--srcsubject %s",
desc="subject id for source surface")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True,
desc="hemisphere to transform")
target_subject = traits.String(mandatory=True, argstr="--trgsubject %s",
desc="subject id of target surface")
target_ico_order = traits.Enum(1, 2, 3, 4, 5, 6, 7,
argstr="--trgicoorder %d",
desc=("order of the icosahedron if "
"target_subject is 'ico'"))
source_type = traits.Enum(filetypes, argstr='--sfmt %s',
requires=['source_file'],
desc="source file format")
target_type = traits.Enum(filetypes, argstr='--tfmt %s',
desc="output format")
reshape = traits.Bool(argstr="--reshape",
desc="reshape output surface to conform with Nifti")
reshape_factor = traits.Int(argstr="--reshape-factor",
desc="number of slices in reshaped image")
out_file = File(argstr="--tval %s", genfile=True,
desc="surface file to write")
class SurfaceTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="transformed surface file")
class SurfaceTransform(FSCommand):
"""Transform a surface file from one subject to another via a spherical registration.
Both the source and target subject must reside in your Subjects Directory,
and they must have been processed with recon-all, unless you are transforming
to one of the icosahedron meshes.
Examples
--------
>>> from nipype.interfaces.freesurfer import SurfaceTransform
>>> sxfm = SurfaceTransform()
>>> sxfm.inputs.source_file = "lh.cope1.nii.gz"
>>> sxfm.inputs.source_subject = "my_subject"
>>> sxfm.inputs.target_subject = "fsaverage"
>>> sxfm.inputs.hemi = "lh"
>>> sxfm.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceTransformInputSpec
output_spec = SurfaceTransformOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
source = self.inputs.source_file
# Some recon-all files don't have a proper extension (e.g. "lh.thickness")
# so we have to account for that here
bad_extensions = [".%s" % e for e in ["area", "mid", "pial", "avg_curv", "curv", "inflated",
"jacobian_white", "orig", "nofix", "smoothwm", "crv",
"sphere", "sulc", "thickness", "volume", "white"]]
use_ext = True
if split_filename(source)[2] in bad_extensions:
source = source + ".stripme"
use_ext = False
ext = ""
if isdefined(self.inputs.target_type):
ext = "." + filemap[self.inputs.target_type]
use_ext = False
outputs["out_file"] = fname_presuffix(source,
suffix=".%s%s" % (self.inputs.target_subject, ext),
newpath=os.getcwd(),
use_ext=use_ext)
else:
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class Surface2VolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--surfval %s',
copyfile=False, mandatory=True,
desc='This is the source of the surface values')
hemi = traits.Str(argstr='--hemi %s', mandatory=True,
desc='hemisphere of data')
transformed_file = File(name_template="%s_asVol.nii", desc='Output volume',
argstr='--outvol %s',
name_source=['source_file'], hash_files=False)
reg_file = File(exists=True, argstr='--volreg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)',
xor=['subject_id'])
template_file = File(exists=True, argstr='--template %s',
desc='Output template volume')
mkmask = traits.Bool(desc='make a mask instead of loading surface values',
argstr='--mkmask')
vertexvol_file = File(name_template="%s_asVol_vertex.nii",
desc=('Path name of the vertex output volume, which '
'is the same as output volume except that the '
'value of each voxel is the vertex-id that is '
'mapped to that voxel.'),
argstr='--vtxvol %s', name_source=['source_file'],
hash_files=False)
surf_name = traits.Str(argstr='--surf %s',
desc='surfname (default is white)')
projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction')
subjects_dir = traits.Str(argstr='--sd %s',
desc=('freesurfer subjects directory defaults to '
'$SUBJECTS_DIR'))
subject_id = traits.Str(argstr='--identity %s',desc='subject id',
xor=['reg_file'])
class Surface2VolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True,
desc='Path to output file if used normally')
vertexvol_file = File(desc='vertex map volume path id. Optional')
class Surface2VolTransform(FSCommand):
"""Use FreeSurfer mri_surf2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import Surface2VolTransform
>>> xfm2vol = Surface2VolTransform()
>>> xfm2vol.inputs.source_file = 'lh.cope1.mgz'
>>> xfm2vol.inputs.reg_file = 'register.mat'
>>> xfm2vol.inputs.hemi = 'lh'
>>> xfm2vol.inputs.template_file = 'cope1.nii.gz'
>>> xfm2vol.inputs.subjects_dir = '.'
>>> xfm2vol.cmdline
'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii'
>>> res = xfm2vol.run()# doctest: +SKIP
"""
_cmd = 'mri_surf2vol'
input_spec = Surface2VolTransformInputSpec
output_spec = Surface2VolTransformOutputSpec
class ApplyMaskInputSpec(FSTraitedSpec):
in_file = File(exists=True, mandatory=True, position=-3, argstr="%s",
desc="input image (will be masked)")
mask_file = File(exists=True, mandatory=True, position=-2, argstr="%s",
desc="image defining mask space")
out_file = File(genfile=True, position=-1, argstr="%s",
desc="final image to write")
xfm_file = File(exists=True, argstr="-xform %s",
desc="LTA-format transformation matrix to align mask with input")
invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation")
xfm_source = File(exists=True, argstr="-lta_src %s", desc="image defining transform source space")
xfm_target = File(exists=True, argstr="-lta_dst %s", desc="image defining transform target space")
use_abs = traits.Bool(argstr="-abs", desc="take absolute value of mask before applying")
mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying")
class ApplyMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked image")
class ApplyMask(FSCommand):
"""Use Freesurfer's mri_mask to apply a mask to an image.
The mask file need not be binarized; it can be thresholded above a given
value before application. It can also optionally be transformed into input
space with an LTA matrix.
"""
_cmd = "mri_mask"
input_spec = ApplyMaskInputSpec
output_spec = ApplyMaskOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
outputs["out_file"] = fname_presuffix(self.inputs.in_file,
suffix="_masked",
newpath=os.getcwd(),
use_ext=True)
else:
outputs["out_file"] = os.path.abspath(outputs["out_file"])
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSnapshotsInputSpec(FSTraitedSpec):
subject_id = traits.String(position=1, argstr="%s", mandatory=True,
desc="subject to visualize")
hemi = traits.Enum("lh", "rh", position=2, argstr="%s", mandatory=True,
desc="hemisphere to visualize")
surface = traits.String(position=3, argstr="%s", mandatory=True,
desc="surface to visualize")
show_curv = traits.Bool(argstr="-curv", desc="show curvature", xor=["show_gray_curv"])
show_gray_curv = traits.Bool(argstr="-gray", desc="show curvature in gray", xor=["show_curv"])
overlay = File(exists=True, argstr="-overlay %s", desc="load an overlay volume/surface",
requires=["overlay_range"])
reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"]
overlay_reg = traits.File(exists=True, argstr="-overlay-reg %s", xor=reg_xors,
desc="registration matrix file to register overlay to surface")
identity_reg = traits.Bool(argstr="-overlay-reg-identity", xor=reg_xors,
desc="use the identity matrix to register the overlay to the surface")
mni152_reg = traits.Bool(argstr="-mni152reg", xor=reg_xors,
desc="use to display a volume in MNI152 space on the average subject")
overlay_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float),
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="overlay range--either min, (min, max) or (min, mid, max)",
argstr="%s")
overlay_range_offset = traits.Float(argstr="-foffset %.3f",
desc="overlay range will be symettric around offset value")
truncate_overlay = traits.Bool(argstr="-truncphaseflag 1",
desc="truncate the overlay display")
reverse_overlay = traits.Bool(argstr="-revphaseflag 1",
desc="reverse the overlay display")
invert_overlay = traits.Bool(argstr="-invphaseflag 1",
desc="invert the overlay display")
demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay")
annot_file = File(exists=True, argstr="-annotation %s", xor=["annot_name"],
desc="path to annotation file to display")
annot_name = traits.String(argstr="-annotation %s", xor=["annot_file"],
desc="name of annotation to display (must be in $subject/label directory")
label_file = File(exists=True, argstr="-label %s", xor=["label_name"],
desc="path to label file to display")
label_name = traits.String(argstr="-label %s", xor=["label_file"],
desc="name of label to display (must be in $subject/label directory")
colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file")
label_under = traits.Bool(argstr="-labels-under", desc="draw label/annotation under overlay")
label_outline = traits.Bool(argstr="-label-outline", desc="draw label/annotation as outline")
patch_file = File(exists=True, argstr="-patch %s", desc="load a patch")
orig_suffix = traits.String(argstr="-orig %s", desc="set the orig surface suffix string")
sphere_suffix = traits.String(argstr="-sphere %s", desc="set the sphere.reg suffix string")
show_color_scale = traits.Bool(argstr="-colscalebarflag 1",
desc="display the color scale bar")
show_color_text = traits.Bool(argstr="-colscaletext 1",
desc="display text in the color scale bar")
six_images = traits.Bool(desc="also take anterior and posterior snapshots")
screenshot_stem = traits.String(desc="stem to use for screenshot file names")
stem_template_args = traits.List(traits.String, requires=["screenshot_stem"],
desc="input names to use as arguments for a string-formated stem template")
tcl_script = File(exists=True, argstr="%s", genfile=True,
desc="override default screenshot script")
class SurfaceSnapshotsOutputSpec(TraitedSpec):
snapshots = OutputMultiPath(File(exists=True),
desc="tiff images of the surface from different perspectives")
class SurfaceSnapshots(FSCommand):
"""Use Tksurfer to save pictures of the cortical surface.
By default, this takes snapshots of the lateral, medial, ventral,
and dorsal surfaces. See the ``six_images`` option to add the
anterior and posterior surfaces.
You may also supply your own tcl script (see the Freesurfer wiki for
information on scripting tksurfer). The screenshot stem is set as the
environment variable "_SNAPSHOT_STEM", which you can use in your
own scripts.
Node that this interface will not run if you do not have graphics
enabled on your system.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial")
>>> shots.inputs.overlay = "zstat1.nii.gz"
>>> shots.inputs.overlay_range = (2.3, 6)
>>> shots.inputs.overlay_reg = "register.dat"
>>> res = shots.run() # doctest: +SKIP
"""
_cmd = "tksurfer"
input_spec = SurfaceSnapshotsInputSpec
output_spec = SurfaceSnapshotsOutputSpec
def _format_arg(self, name, spec, value):
if name == "tcl_script":
if not isdefined(value):
return "-tcl snapshots.tcl"
else:
return "-tcl %s" % value
elif name == "overlay_range":
if isinstance(value, float):
return "-fthresh %.3f" % value
else:
if len(value) == 2:
return "-fminmax %.3f %.3f" % value
else:
return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], value[2], value[1])
elif name == "annot_name" and isdefined(value):
# Matching annot by name needs to strip the leading hemi and trailing
# extension strings
if value.endswith(".annot"):
value = value[:-6]
if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]):
value = value[3:]
return "-annotation %s" % value
return super(SurfaceSnapshots, self)._format_arg(name, spec, value)
def _run_interface(self, runtime):
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (
self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
# Check if the DISPLAY variable is set -- should avoid crashes (might not?)
if not "DISPLAY" in os.environ:
raise RuntimeError("Graphics are not enabled -- cannot run tksurfer")
runtime.environ["_SNAPSHOT_STEM"] = stem
self._write_tcl_script()
runtime = super(SurfaceSnapshots, self)._run_interface(runtime)
# If a display window can't be opened, this will crash on
# aggregate_outputs. Let's try to parse stderr and raise a
# better exception here if that happened.
errors = ["surfer: failed, no suitable display found",
"Fatal Error in tksurfer.bin: could not open display"]
for err in errors:
if err in runtime.stderr:
self.raise_exception(runtime)
# Tksurfer always (or at least always when you run a tcl script)
# exits with a nonzero returncode. We have to force it to 0 here.
runtime.returncode = 0
return runtime
def _write_tcl_script(self):
fid = open("snapshots.tcl", "w")
script = ["save_tiff $env(_SNAPSHOT_STEM)-lat.tif",
"make_lateral_view",
"rotate_brain_y 180",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-med.tif",
"make_lateral_view",
"rotate_brain_x 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ven.tif",
"make_lateral_view",
"rotate_brain_x -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-dor.tif"]
if isdefined(self.inputs.six_images) and self.inputs.six_images:
script.extend(["make_lateral_view",
"rotate_brain_y 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-pos.tif",
"make_lateral_view",
"rotate_brain_y -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ant.tif"])
script.append("exit")
fid.write("\n".join(script))
fid.close()
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"]
if self.inputs.six_images:
snapshots.extend(["%s-pos.tif", "%s-ant.tif"])
snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots]
outputs["snapshots"] = snapshots
return outputs
def _gen_filename(self, name):
if name == "tcl_script":
return "snapshots.tcl"
return None
class ImageInfoInputSpec(FSTraitedSpec):
in_file = File(exists=True, position=1, argstr="%s", desc="image to query")
class ImageInfoOutputSpec(TraitedSpec):
info = traits.Any(desc="output of mri_info")
out_file = File(exists=True, desc="text file with image information")
data_type = traits.String(desc="image data type")
file_format = traits.String(desc="file format")
TE = traits.String(desc="echo time (msec)")
TR = traits.String(desc="repetition time(msec)")
TI = traits.String(desc="inversion time (msec)")
dimensions = traits.Tuple(desc="image dimensions (voxels)")
vox_sizes = traits.Tuple(desc="voxel sizes (mm)")
orientation = traits.String(desc="image orientation")
ph_enc_dir = traits.String(desc="phase encode direction")
class ImageInfo(FSCommand):
_cmd = "mri_info"
input_spec = ImageInfoInputSpec
output_spec = ImageInfoOutputSpec
def info_regexp(self, info, field, delim="\n"):
m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info)
if m:
return m.group(1)
else:
return None
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
info = runtime.stdout
outputs.info = info
# Pulse sequence parameters
for field in ["TE", "TR", "TI"]:
fieldval = self.info_regexp(info, field, ", ")
if fieldval.endswith(" msec"):
fieldval = fieldval[:-5]
setattr(outputs, field, fieldval)
# Voxel info
vox = self.info_regexp(info, "voxel sizes")
vox = tuple(vox.split(", "))
outputs.vox_sizes = vox
dim = self.info_regexp(info, "dimensions")
dim = tuple([int(d) for d in dim.split(" x ")])
outputs.dimensions = dim
outputs.orientation = self.info_regexp(info, "Orientation")
outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir")
# File format and datatype are both keyed by "type"
ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info)
outputs.file_format = ftype
outputs.data_type = dtype
return outputs
class MRIsConvertInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
annot_file = File(exists=True, argstr="--annot %s",
desc="input is annotation or gifti label data")
parcstats_file = File(exists=True, argstr="--parcstats %s",
desc="infile is name of text file containing label/val pairs")
label_file = File(exists=True, argstr="--label %s",
desc="infile is .label file, label is name of this label")
scalarcurv_file = File(exists=True, argstr="-c %s",
desc="input is scalar curv overlay file (must still specify surface)")
functional_file = File(exists=True, argstr="-f %s",
desc="input is functional time-series or other multi-frame data (must specify surface)")
labelstats_outfile = File(exists=False, argstr="--labelstats %s",
desc="outfile is name of gifti file to which label stats will be written")
patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface")
rescale = traits.Bool(argstr="-r", desc="rescale vertex xyz so total area is same as group average")
normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data")
xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file")
vertex = traits.Bool(argstr="-v", desc="Writes out neighbors of a vertex in each row")
scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale")
dataarray_num = traits.Int(argstr="--da_num %d", desc="if input is gifti, 'num' specifies which data array to use")
talairachxfm_subjid = traits.String(argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz")
origname = traits.String(argstr="-o %s", desc="read orig positions")
in_file = File(exists=True, mandatory=True, position=-2, argstr='%s', desc='File to read/convert')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
#Not really sure why the ./ is necessary but the module fails without it
out_datatype = traits.Enum("ico", "tri", "stl", "vtk", "gii", "mgh", "mgz", mandatory=True,
desc="These file formats are supported: ASCII: .asc" \
"ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz")
class MRIsConvertOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
converted = File(exists=True, desc='converted output surface')
class MRIsConvert(FSCommand):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mris = fs.MRIsConvert()
>>> mris.inputs.in_file = 'lh.pial'
>>> mris.inputs.out_datatype = 'gii'
>>> mris.run() # doctest: +SKIP
"""
_cmd = 'mris_convert'
input_spec = MRIsConvertInputSpec
output_spec = MRIsConvertOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["converted"] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.annot_file):
_, name, ext = split_filename(self.inputs.annot_file)
elif isdefined(self.inputs.parcstats_file):
_, name, ext = split_filename(self.inputs.parcstats_file)
elif isdefined(self.inputs.label_file):
_, name, ext = split_filename(self.inputs.label_file)
elif isdefined(self.inputs.scalarcurv_file):
_, name, ext = split_filename(self.inputs.scalarcurv_file)
elif isdefined(self.inputs.functional_file):
_, name, ext = split_filename(self.inputs.functional_file)
elif isdefined(self.inputs.in_file):
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + "_converted." + self.inputs.out_datatype
class MRITessellateInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=-2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
tesselate_all_voxels = traits.Bool(argstr='-a', desc='Tessellate the surface of all voxels with different labels')
use_real_RAS_coordinates = traits.Bool(argstr='-n', desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0')
class MRITessellateOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRITessellate(FSCommand):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> tess = fs.MRITessellate()
>>> tess.inputs.in_file = 'aseg.mgz'
>>> tess.inputs.label_value = 17
>>> tess.inputs.out_file = 'lh.hippocampus'
>>> tess.run() # doctest: +SKIP
"""
_cmd = 'mri_tessellate'
input_spec = MRITessellateInputSpec
output_spec = MRITessellateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
else:
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + '_' + str(self.inputs.label_value)
class MRIMarchingCubesInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=1, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
connectivity_value = traits.Int(1, position=-1, argstr='%d', usedefault=True,
desc='Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)')
out_file = File(argstr='./%s', position=-2, genfile=True, desc='output filename or True to generate one')
class MRIMarchingCubesOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRIMarchingCubes(FSCommand):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mc = fs.MRIMarchingCubes()
>>> mc.inputs.in_file = 'aseg.mgz'
>>> mc.inputs.label_value = 17
>>> mc.inputs.out_file = 'lh.hippocampus'
>>> mc.run() # doctest: +SKIP
"""
_cmd = 'mri_mc'
input_spec = MRIMarchingCubesInputSpec
output_spec = MRIMarchingCubesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + ext + '_' + str(self.inputs.label_value))
class SmoothTessellationInputSpec(FSTraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
in_file = File(exists=True, mandatory=True, argstr='%s', position=1, desc='Input volume to tesselate voxels from.')
curvature_averaging_iterations = traits.Int(10, usedefault=True, argstr='-a %d', position=-1, desc='Number of curvature averaging iterations (default=10)')
smoothing_iterations = traits.Int(10, usedefault=True, argstr='-n %d', position=-2, desc='Number of smoothing iterations (default=10)')
snapshot_writing_iterations = traits.Int(argstr='-w %d', desc='Write snapshot every "n" iterations')
use_gaussian_curvature_smoothing = traits.Bool(argstr='-g', position=3, desc='Use Gaussian curvature smoothing')
gaussian_curvature_norm_steps = traits.Int(argstr='%d ', position=4, desc='Use Gaussian curvature smoothing')
gaussian_curvature_smoothing_steps = traits.Int(argstr='%d', position=5, desc='Use Gaussian curvature smoothing')
disable_estimates = traits.Bool(argstr='-nw', desc='Disables the writing of curvature and area estimates')
normalize_area = traits.Bool(argstr='-area', desc='Normalizes the area after smoothing')
use_momentum = traits.Bool(argstr='-m', desc='Uses momentum')
out_file = File(argstr='%s', position=2, genfile=True, desc='output filename or True to generate one')
out_curvature_file = File(argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")')
out_area_file = File(argstr='-b %s', desc='Write area to ?h.areaname (default "area")')
class SmoothTessellationOutputSpec(TraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
surface = File(exists=True, desc='Smoothed surface file ')
class SmoothTessellation(FSCommand):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
.. seealso::
SurfaceSmooth() Interface
For smoothing a scalar field along a surface manifold
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> smooth = fs.SmoothTessellation()
>>> smooth.inputs.in_file = 'lh.hippocampus.stl'
>>> smooth.run() # doctest: +SKIP
"""
_cmd = 'mris_smooth'
input_spec = SmoothTessellationInputSpec
output_spec = SmoothTessellationOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(SmoothTessellation, self)._run_interface(runtime)
if "failed" in runtime.stderr:
self.raise_exception(runtime)
return runtime
class MakeAverageSubjectInputSpec(FSTraitedSpec):
subjects_ids = traits.List(traits.Str(), argstr='--subjects %s',
desc='freesurfer subjects ids to average',
mandatory=True, sep=' ')
out_name = File('average', argstr='--out %s',
desc='name for the average subject', usedefault=True)
class MakeAverageSubjectOutputSpec(TraitedSpec):
average_subject_name = traits.Str(desc='Output registration file')
class MakeAverageSubject(FSCommand):
"""Make an average freesurfer subject
Examples
--------
>>> from nipype.interfaces.freesurfer import MakeAverageSubject
>>> avg = MakeAverageSubject(subjects_ids=['s1', 's2'])
>>> avg.cmdline
'make_average_subject --out average --subjects s1 s2'
"""
_cmd = 'make_average_subject'
input_spec = MakeAverageSubjectInputSpec
output_spec = MakeAverageSubjectOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['average_subject_name'] = self.inputs.out_name
return outputs
class ExtractMainComponentInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s', position=1,
desc='input surface file')
out_file = File(name_template='%s.maincmp', name_source='in_file',
argstr='%s', position=2,
desc='surface containing main component')
class ExtractMainComponentOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='surface containing main component')
class ExtractMainComponent(CommandLine):
"""Extract the main component of a tesselated surface
Examples
--------
>>> from nipype.interfaces.freesurfer import ExtractMainComponent
>>> mcmp = ExtractMainComponent(in_file='lh.pial')
>>> mcmp.cmdline
'mris_extract_main_component lh.pial lh.maincmp'
"""
_cmd='mris_extract_main_component'
input_spec=ExtractMainComponentInputSpec
output_spec=ExtractMainComponentOutputSpec
|
mick-d/nipype_source
|
nipype/interfaces/freesurfer/utils.py
|
Python
|
bsd-3-clause
| 49,356
|
[
"Gaussian",
"VTK"
] |
54fdd4fb003cc8369075b35c4f4d6520e0d2fd633b681b46497f51cd523ebe09
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# ICConvoluted.py
#
# Defines the IPeakFunction IkedaCarpenterConvoluted
# which is the standard Ikeda-Carpenter (IC) function convoluted with
# a square wave and a Gaussian.
#
#
from __future__ import (absolute_import, division, print_function)
import numpy as np
from mantid.api import IFunction1D, FunctionFactory
class IkedaCarpenterConvoluted(IFunction1D):
def init(self):
self.declareParameter("A") #Alpha
self.declareParameter("B") #Beta
self.declareParameter("R") #R - ratio of fast to slow neutrons
self.declareParameter("T0") #T0 - time offset
self.declareParameter("Scale") #amplitude
self.declareParameter("HatWidth") #width of square wave
self.declareParameter("KConv") #KConv for Gaussian
#use penalty=None to not use default mantid penalty
def setPenalizedConstraints(self, A0=None, B0=None, R0=None, T00=None, Scale0=None, HatWidth0=None, KConv0=None, penalty=None):
if A0 is not None:
self.addConstraints("{:4.4e} < A < {:4.4e}".format(A0[0], A0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("A", penalty)
if B0 is not None:
self.addConstraints("{:4.4e} < B < {:4.4e}".format(B0[0], B0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("B", penalty)
if R0 is not None:
self.addConstraints("{:4.4e} < R < {:4.4e}".format(R0[0], R0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("R", penalty)
if T00 is not None:
self.addConstraints("{:4.4e} < T0 < {:4.4e}".format(T00[0], T00[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("T0", penalty)
if Scale0 is not None:
self.addConstraints("{:4.4e} < Scale < {:4.4e}".format(Scale0[0], Scale0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("Scale", penalty)
if HatWidth0 is not None:
self.addConstraints("{:4.4e} < HatWidth < {:4.4e}".format(HatWidth0[0], HatWidth0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("HatWidth", penalty)
if KConv0 is not None:
self.addConstraints("{:4.4e} < KConv < {:4.4e}".format(KConv0[0], KConv0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("KConv", penalty)
def function1D(self, t):
A = self.getParamValue(0)
B = self.getParamValue(1)
R = self.getParamValue(2)
T0 = self.getParamValue(3)
Scale = self.getParamValue(4)
HatWidth = self.getParamValue(5)
KConv = self.getParamValue(6)
# A/2 Scale factor has been removed to make A and Scale independent
f_int = Scale*((1-R)*np.power((A*(t-T0)),2)*
np.exp(-A*(t-T0))+2*R*A**2*B/np.power((A-B),3) *
(np.exp(-B*(t-T0))-np.exp(-A*(t-T0))*(1+(A-B)*(t-T0)+0.5*np.power((A-B),2)*np.power((t-T0),2))))
f_int[t<T0] = 0
mid_point_hat = len(f_int)//2
gc_x = np.array(range(len(f_int))).astype(float)
ppd = 0.0*gc_x
lowIDX = int(np.floor(np.max([mid_point_hat-np.abs(HatWidth),0])))
highIDX = int(np.ceil(np.min([mid_point_hat+np.abs(HatWidth),len(gc_x)])))
ppd[lowIDX:highIDX] = 1.0
ppd = ppd/sum(ppd)
gc_x = np.array(range(len(f_int))).astype(float)
gc_x = 2*(gc_x-np.min(gc_x))/(np.max(gc_x)-np.min(gc_x))-1
gc_f = np.exp(-KConv*np.power(gc_x,2))
gc_f = gc_f/np.sum(gc_f)
npad = len(f_int) - 1
first = npad - npad//2
f_int = np.convolve(f_int,ppd,'full')[first:first+len(f_int)]
f_int = np.convolve(f_int,gc_f,'full')[first:first+len(f_int)]
return f_int
# Evaluate the function for a differnt set of paremeters (trialc)
def function1DDiffParams(self, xvals, trialc):
# First, grab the original parameters and set to trialc
c = np.zeros(self.numParams())
for i in range(self.numParams()):
c[i] = self.getParamValue(i)
self.setParameter(i, trialc[i])
# Get the trial values
f_trial = self.function1D(xvals)
# Now return to the orignial
for i in range(self.numParams()):
self.setParameter(i, c[i])
return f_trial
# Construction the Jacobian (df) for the function
def functionDeriv1D(self, xvals, jacobian, eps=1.e-3):
f_int = self.function1D(xvals)
#Fetch parameters into array c
c = np.zeros(self.numParams())
for i in range(self.numParams()):
c[i] = self.getParamValue(i)
nc = np.prod(np.shape(c))
for k in range(nc):
dc = np.zeros(nc)
dc[k] = max(eps,eps*c[k])
f_new = self.function1DDiffParams(xvals,c+dc)
for i,dF in enumerate(f_new-f_int):
jacobian.set(i,k,dF/dc[k])
FunctionFactory.subscribe(IkedaCarpenterConvoluted)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/functions/ICConvoluted.py
|
Python
|
gpl-3.0
| 5,350
|
[
"Gaussian"
] |
fdbab072a9adede867d189fe4a01e6dcfbb556b93d7aab197ee0d6579a409be7
|
#!/usr/bin/env python
from __future__ import with_statement
import threading, time, sys, os
import Queue
import motmot.utils.config
import traceback
import warnings
import socket
import pkg_resources # from setuptools
import wx
import motmot.wxvalidatedtext.wxvalidatedtext as wxvt
from optparse import OptionParser
import motmot.cam_iface.choose as cam_iface_choose
cam_iface = None
import numpy as nx
import numpy as np
import motmot.FlyMovieFormat.FlyMovieFormat as FlyMovieFormat
from wx import xrc
from .plugin_manager import load_plugins
from .utils import SharedValue
from .version import __version__
if int(os.environ.get('FVIEW_NO_OPENGL','0')):
import motmot.wxvideo.wxvideo as video_module
have_opengl = False
else:
import motmot.wxglvideo.simple_overlay as video_module
have_opengl = True
# trigger extraction
RESFILE = pkg_resources.resource_filename(__name__,"fview.xrc")
RESDIR = os.path.split(RESFILE)[0]
RES = xrc.EmptyXmlResource()
RES.LoadFromString(open(RESFILE).read())
def my_loadpanel(parent,panel_name):
orig_dir = os.path.abspath(os.curdir)
if os.path.exists(RESDIR): # sometimes RESDIR can be "" (GH-1)
os.chdir(RESDIR)
try:
result = RES.LoadPanel(parent,panel_name)
finally:
os.chdir(orig_dir)
return result
########
# persistent configuration data ( implementation in motmot.utils.config )
def get_rc_params():
defaultParams = {
'backend' : 'mega',
'wrapper' : 'ctypes',
'flipLR' : True,
'rotate180' : False,
'view_interval' : 1,
'movie_fname_prefix' : 'movie',
}
fviewrc_fname = motmot.utils.config.rc_fname(filename='fviewrc',
dirname='.fview')
rc_params = motmot.utils.config.get_rc_params(fviewrc_fname,
defaultParams)
return rc_params
def save_rc_params():
save_fname = motmot.utils.config.rc_fname(must_already_exist=False,
filename='fviewrc',
dirname='.fview')
try:
motmot.utils.config.save_rc_params(save_fname,rc_params)
except IOError, err:
warnings.warn('ERROR saving config file: %s'%err)
rc_params = get_rc_params()
########
# use to trigger GUI thread action from grab thread
CamPropertyDataReadyEvent = wx.NewEventType()
# use to trigger GUI thread action from grab thread
CamROIDataReadyEvent = wx.NewEventType()
# use to trigger GUI thread action from grab thread
ImageReadyEvent = wx.NewEventType()
# use to trigger GUI thread action from grab thread
CamFramerateReadyEvent = wx.NewEventType()
# use to trigger GUI thread action from grab thread
FViewShutdownEvent = wx.NewEventType()
USE_DEBUG = bool(int(os.environ.get('FVIEW_DEBUG','0')))
def DEBUG():
print 'line %d thread %s'%(sys._getframe().f_back.f_lineno,
threading.currentThread())
class WindowsTimeHack:
def __init__(self):
tmp = time.clock()
self.t1 = time.time()
def time(self):
return time.clock() + self.t1
if sys.platform == 'win32':
thack = WindowsTimeHack()
time_func = thack.time
else:
time_func = time.time
in_fnt = Queue.Queue()
def grab_func(wxapp,
image_update_lock,
cam,
cam_id,
max_priority_enabled,
quit_now,
thread_done,
fps_value,
framerate_value,
num_buffers_value,
#AR app_ready,
plugins,
cam_prop_get_queue,
cam_roi_get_queue,
framerate_get_queue,
cam_cmd_queue,
fview_ext_trig_plugin,
):
# transfer data from camera
global in_fnt
def showerr(str):
print str
max_priority_enabled.clear()
if sys.platform.startswith('linux'):
# Not all POSIX platforms support sched_getparam(). See
# http://lists.apple.com/archives/Unix-porting/2005/Jul/msg00027.html
import posix_sched
try:
max_priority = posix_sched.get_priority_max( posix_sched.FIFO )
sched_params = posix_sched.SchedParam(max_priority)
posix_sched.setscheduler(0, posix_sched.FIFO, sched_params)
max_priority_enabled.set()
except Exception, x:
pass # not really a problem, just not in maximum priority mode
n_frames = 0
good_n_frames = 0
start = None
#AR app_ready.wait() # delay before starting camera to let wx start
if hasattr(cam,'set_thread_owner'):
cam.set_thread_owner()
max_width = cam.get_max_width()
max_height = cam.get_max_height()
cam.start_camera()
if 1:
# This reduces likelihood of frame corruption on libdc1394
# 2.1.0 with Firefly MV USB cameras. Tested on Ubuntu 8.04
# amd64 with libusb-1.0.1.
time.sleep(0.1)
# semi-hack to maximize hardware ROI on start
try: cam.set_frame_roi(0,0,max_width,max_height)
except cam_iface.CamIFaceError, err:
print ('fview warning: ignoring error on set_frame_roi() '
'while trying to maximize ROI at start')
l,b,w,h = cam.get_frame_roi()
xyoffset = l,b
# find memory allocator from plugins (e.g. FastImage)
buf_allocator = None
for plugin in plugins:
if hasattr(plugin,'get_buffer_allocator'):
buf_allocator = plugin.get_buffer_allocator(cam_id)
if buf_allocator is not None:
break # use first allocator
send_framerate = False
timestamp_source = 'camera driver'
try:
while not quit_now.isSet():
try:
if buf_allocator is None:
cam_iface_buf = cam.grab_next_frame_blocking()
else:
# shorthand
func = cam.grab_next_frame_into_alloced_buf_blocking
cam_iface_buf = func(buf_allocator)
del func
this_frame_has_good_data = True
except cam_iface.BuffersOverflowed:
showerr('WARNING: buffers overflowed, frame numbers off')
continue
except (cam_iface.FrameSystemCallInterruption, cam_iface.NoFrameReturned):
# re-try
continue
except cam_iface.FrameDataMissing:
#showerr('WARNING: frame data missing')
this_frame_has_good_data = False
except cam_iface.FrameDataCorrupt:
#showerr('WARNING: frame data missing')
this_frame_has_good_data = False
if USE_DEBUG:
sys.stdout.write('.')
sys.stdout.flush()
try:
camera_driver_timestamp=cam.get_last_timestamp()
except cam_iface.CamIFaceError, err:
# XXX this is a hack to deal with trouble getting timestamp
camera_driver_timestamp = -time.time()
fno = cam.get_last_framenumber()
now = time_func()
if start is None:
start = now
n_frames += 1
if not this_frame_has_good_data:
continue
plugin_points = []
plugin_linesegs = []
if fview_ext_trig_plugin is not None:
points,linesegs = fview_ext_trig_plugin.process_frame(
cam_id, cam_iface_buf, xyoffset, camera_driver_timestamp, fno )
plugin_points.extend( points )
plugin_linesegs.extend( linesegs )
if timestamp_source == 'camera driver':
use_timestamp = camera_driver_timestamp # from camera driver
elif timestamp_source == 'host clock':
use_timestamp = now # from computer's own clockcamera driver
elif timestamp_source == 'CamTrig':
use_timestamp = fview_ext_trig_plugin.get_last_trigger_timestamp(cam_id)
else:
raise ValueError('unknown camera timestamp source')
good_n_frames += 1
for plugin in plugins:
if plugin is fview_ext_trig_plugin:
# already did this plugin above
continue
points,linesegs = plugin.process_frame(
cam_id, cam_iface_buf, xyoffset, use_timestamp, fno )
plugin_points.extend( points )
plugin_linesegs.extend( linesegs )
#buf = nx.asarray(cam_iface_buf)
image_update_lock.acquire()
wxapp.last_image_fullsize = (max_width,max_height)
wxapp.last_image = cam_iface_buf # frame
wxapp.last_offset = xyoffset
wxapp.new_image = True
wxapp.plugin_points = plugin_points
wxapp.plugin_linesegs = plugin_linesegs
image_update_lock.release()
event = wx.CommandEvent(ImageReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
if in_fnt.qsize() < 1000:
# save a copy of the buffer
in_fnt.put( (cam_iface_buf, xyoffset, use_timestamp, fno) )
else:
showerr('ERROR: not appending new frame to queue, because '
'it already has 1000 frames!')
if now - start > 1.0:
fps = n_frames/(now-start)
good_fps = good_n_frames/(now-start)
start = now
n_frames = 0
good_n_frames = 0
fps_value.set((fps,good_fps))
if framerate_value.is_new_value_waiting():
fr = framerate_value.get_nowait()
try:
cam.set_framerate(fr)
except Exception, err:
showerr('ignoring error setting framerate: '+str(err))
except:
showerr('ignoring error setting framerate')
send_framerate = True
if num_buffers_value.is_new_value_waiting():
nb = num_buffers_value.get_nowait()
try:
cam.set_num_framebuffers(nb)
except Exception, err:
showerr('ignoring error setting number of framebuffers: '
'%s'%err)
except:
showerr('ignoring error setting number of framebuffers')
send_framerate = True
try:
while 1:
cmd,cmd_payload = cam_cmd_queue.get_nowait()
if cmd == 'property change':
prop_num,new_value,set_auto = cmd_payload
try:
cam.set_camera_property(
prop_num, new_value, set_auto )
except Exception, err:
showerr('ignoring error setting property: %s'%err)
value,auto = cam.get_camera_property( prop_num )
cam_prop_get_queue.put( (prop_num, value, auto) )
event = wx.CommandEvent(CamPropertyDataReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
elif cmd == 'property query':
num_props = cam.get_num_camera_properties()
for prop_num in range(num_props):
value,auto = cam.get_camera_property( prop_num )
cam_prop_get_queue.put( (prop_num, value, auto) )
event = wx.CommandEvent(CamPropertyDataReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
elif cmd == 'ROI query':
l,b,w,h = cam.get_frame_roi()
r = l+w
t = b+h
cam_roi_get_queue.put( (l,b,r,t) )
event = wx.CommandEvent(CamROIDataReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
elif cmd == 'ROI set':
l,b,r,t = cmd_payload
w = r-l
h = t-b
try:
# if camera needs to be stopped for these
# operations, do it in the driver (not all
# cameras must be stopped).
cam.set_frame_roi(l,b,w,h)
xyoffset = l,b
except cam_iface.CamIFaceError, x:
# error setting frame size/offset
sys.stderr.write('fview ignoring error when '
'attempting to set ROI: %s\n'%(x,))
else:
# send ROI back out to GUI thread if no error
cam_roi_get_queue.put( (l,b,r,t) )
event = wx.CommandEvent(CamROIDataReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
elif cmd=='TriggerMode Set':
cam.set_trigger_mode_number(cmd_payload)
send_framerate = True
elif cmd=='framerate query':
send_framerate = True
elif cmd=='timestamp source':
timestamp_source = cmd_payload
else:
raise ValueError('unknown command: %s'%cmd)
except Queue.Empty:
pass
if send_framerate:
# framerate
current_framerate = cam.get_framerate()
trigger_mode = cam.get_trigger_mode_number()
num_buffers = cam.get_num_framebuffers()
framerate_get_queue.put(
(current_framerate,trigger_mode,num_buffers) )
event = wx.CommandEvent(CamFramerateReadyEvent)
event.SetEventObject(wxapp)
wx.PostEvent(wxapp, event)
# do num_buffers
send_framerate = False
finally:
try:
cam.close()
except Exception,err:
print 'ERROR trying to close camera:',err
thread_done.set()
def save_func(wxapp,
save_info_lock,
quit_now,
):
"""save function for running in separate thread
It's important to save data in a thread separate from the grab
thread because we don't want to skip frames and it's important to
be outside the GUI mainloop, because we don't want to block user
input.
"""
# transfer data from camera
global in_fnt
while not quit_now.isSet():
try:
while 1: # process each frame
frame, xyoffset, timestamp, fno = in_fnt.get(0)
if wxapp.save_fno:
save_temporal_value = float(fno)
else:
save_temporal_value = timestamp
# lock should be held to use wxapp.save_images and
# wxapp.fly_movie
with save_info_lock:
nth_frame = wxapp.save_images
if nth_frame:
if fno%nth_frame==0:
wxapp.fly_movie.add_frame(frame,save_temporal_value)
except Queue.Empty:
pass
time.sleep(0.1) # give other threads plenty of time
class CameraParameterHelper:
def __init__(self, cam, wxparent, wxsizer, prop_num, fview_app ):
"""
This __init__ method gets called while cam has not been passed
off to grab thread, and thus can directly manipulate cam.
"""
self.prop_num = prop_num
del prop_num
self.fview_app = fview_app
del fview_app
self.present = True
self.props = cam.get_camera_property_info(self.prop_num)
if not self.props['is_present']:
self.present = False
return
elif ('available' in self.props and # added in libcamiface 0.5.7, motmot.camiface 0.4.8
not self.props['available']):
self.present = False
return
elif not self.props['has_manual_mode']:
# Temperature on Dragonfly2 doesn't like to be read out,
# even though it reports being readout_capable. Don't
# build a control for it.
# (TODO: self.props['original_value'] could be used to set
# the value in the GUI and get_camera_property could not
# be called.)
self.present = False
return
self.current_value, self.current_is_auto = cam.get_camera_property(
self.prop_num)
label = self.props['name']+':'
if self.props['is_scaled_quantity']:
label += ' (%s)'%(self.props['scaled_unit_name'],)
statictext = wx.StaticText(wxparent,label=label)
wxsizer.Add(statictext,flag=wx.ALIGN_CENTRE_VERTICAL)
self.slider = wx.Slider(wxparent,
style=wx.SL_HORIZONTAL)
minv = self.props['min_value']
maxv = self.props['max_value']
if minv == maxv:
self.slider.SetRange(minv-1,maxv+1)
self.slider.Enable(False)
else:
self.slider.SetRange(minv,maxv)
wx.EVT_COMMAND_SCROLL(self.slider, self.slider.GetId(), self.OnScroll)
wxsizer.Add(self.slider,
flag=wx.ALIGN_CENTRE_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,
border=5)
self.scaledtext = wx.TextCtrl(wxparent)
if self.props['is_scaled_quantity']:
self.validator = wxvt.setup_validated_float_callback(
self.scaledtext, self.scaledtext.GetId(),
self.OnSetScaledValue,
ignore_initial_value=True)
else:
self.validator = wxvt.setup_validated_integer_callback(
self.scaledtext, self.scaledtext.GetId(),
self.OnSetRawValue,
ignore_initial_value=True)
wxsizer.Add(self.scaledtext)
self.auto_widget = wx.CheckBox(wxparent,-1,'auto')
wxsizer.Add(self.auto_widget)
num_auto_modes = (self.props['has_manual_mode'] +
self.props['has_auto_mode'])
self.auto_widget.SetValue(self.current_is_auto)
if num_auto_modes < 2:
self.auto_widget.Enable(False)
wx.EVT_CHECKBOX(
self.auto_widget, self.auto_widget.GetId(), self.OnToggleAuto)
self.other_updates = []
self.Update()
self.fview_app.register_property_query_callback(
self.prop_num, self.OnReceiveProperty)
def OnReceiveProperty(self, value, auto):
self.current_value, self.current_is_auto = value, auto
self.Update()
def AddToUpdate(self,ou):
self.other_updates.append( ou )
def Update(self, event=None):
self.slider.SetValue(self.current_value)
self.auto_widget.SetValue( self.current_is_auto )
if self.props['is_scaled_quantity']:
self.scaledtext.SetValue(
str(self.current_value*self.props['scale_gain']
+self.props['scale_offset']) )
else:
self.scaledtext.SetValue( str(self.current_value) )
self.validator.set_state('valid')
def OnScroll(self, event):
widget = event.GetEventObject()
new_value = widget.GetValue()
self.fview_app.enqueue_property_change(
(self.prop_num,new_value,self.current_is_auto) )
self.current_value = new_value
self._UpdateSelfAndOthers()
def OnSetScaledValue(self,event):
# we know this is a valid float
widget = event.GetEventObject()
new_value_scaled = float(widget.GetValue())
new_value = ((new_value_scaled-self.props['scale_offset']) /
self.props['scale_gain'])
new_value = int(round(new_value))
self.fview_app.enqueue_property_change(
(self.prop_num,new_value,self.current_is_auto) )
self.current_value = new_value
self._UpdateSelfAndOthers()
def OnSetRawValue(self,event):
# we know this is a valid int
widget = event.GetEventObject()
new_value = int(widget.GetValue())
self.fview_app.enqueue_property_change(
(self.prop_num,new_value,self.current_is_auto) )
self.current_value = new_value
self._UpdateSelfAndOthers()
def OnToggleAuto(self, event):
widget = event.GetEventObject()
set_auto = widget.IsChecked()
self.current_is_auto = set_auto
self.fview_app.enqueue_property_change(
(self.prop_num,self.current_value,set_auto) )
self._UpdateSelfAndOthers()
def _UpdateSelfAndOthers(self, event=None):
self.Update()
for ou in self.other_updates:
ou.Update()
class InitCameraDialog(wx.Dialog):
def __init__(self,*args,**kw):
cam_info = kw['cam_info']
del kw['cam_info']
wx.Dialog.__init__(self,*args,**kw)
sizer = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, "Select camera and parameters")
font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.NORMAL)
label.SetFont(font)
sizer.Add(label, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALL, 5)
label = wx.StaticText(
self, -1, "Note: this program does not support hotplugging.")
sizer.Add(label, 0, wx.ALIGN_CENTRE_HORIZONTAL|wx.ALL, 5)
del label
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
del line
# build flexgrid
ncols = 3
flexgridsizer = wx.FlexGridSizer( -1, ncols )
for i in range(ncols):
flexgridsizer.AddGrowableCol(i)
if 1:
label = wx.StaticText(self, -1, "Camera")
label.SetFont(font)
flexgridsizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
label = wx.StaticText(self, -1, "Number of\nframebuffers")
label.SetFont(font)
flexgridsizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
label = wx.StaticText(self, -1, "Video mode")
label.SetFont(font)
flexgridsizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.num_buffers = []
self.radios = []
for idx in range(len(cam_info)):
#label = wx.StaticText(self, -1, "Camera #%d:"%(idx+1,))
#flexgridsizer.Add(label, 0, wx.ALIGN_RIGHT|wx.TOP|wx.BOTTOM, 5)
if cam_info[idx] is not None:
this_cam_string = "%s %s (%s)"%(
str(cam_info[idx]['vendor']),
str(cam_info[idx]['model']),
str(cam_info[idx]['chip']))
else:
this_cam_string = '(unavailable camera)'
radio = wx.RadioButton( self, -1, this_cam_string )
self.radios.append(radio)
flexgridsizer.Add(radio, 0, wx.ALIGN_CENTRE)
if cam_info[idx] is not None:
num_buf_str = str(cam_info[idx]['num_buffers'])
else:
num_buf_str = '0'
text = wx.TextCtrl(self, -1, num_buf_str,
style=wx.TE_CENTRE)
wxvt.setup_validated_integer_callback(text,
text.GetId(),
None)
self.num_buffers.append(text)
flexgridsizer.Add(text, 0, wx.ALIGN_CENTRE)
if cam_info[idx] is not None:
mode_choice_strings=cam_info[idx]['mode_choice_strings']
else:
mode_choice_strings=['(no mode)']
mode_choice = wx.Choice(self, -1, choices=mode_choice_strings)
if cam_info[idx] is None:
radio.SetValue(False)
radio.Enable(False)
text.Enable(False)
mode_choice.Enable(False)
else:
radio.SetValue(True)
choice = 0
for i,mode_choice_string in enumerate(mode_choice_strings):
if 'DC1394_VIDEO_MODE_FORMAT7_0' in mode_choice_string:
if 'YUV422' in mode_choice_string:
choice = i
break
elif 'YUV411' in mode_choice_string:
choice = i
break
elif 'MONO8' in mode_choice_string:
choice = i
break
mode_choice.SetSelection(choice)
flexgridsizer.Add(mode_choice, 0, wx.ALIGN_CENTRE)
if cam_info[idx] is not None:
cam_info[idx]['mode_choice_control'] = mode_choice
sizer.Add(flexgridsizer, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
btnsizer = wx.BoxSizer()
btn = wx.Button(self, wx.ID_OK, "OK")
wx.EVT_BUTTON(btn, wx.ID_OK, self.OnOK)
btn.SetDefault()
btnsizer.Add(btn,0,flag=wx.LEFT | wx.RIGHT,border=5)
btn = wx.Button(self, wx.ID_CANCEL, "Cancel")
wx.EVT_BUTTON(btn, wx.ID_CANCEL, self.OnCancel)
btnsizer.Add(btn,0,0)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def OnOK(self,event):
self.SetReturnCode(wx.ID_OK)
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.SetReturnCode(wx.ID_CANCEL)
self.EndModal(wx.ID_CANCEL)
class BackendChoiceDialog(wx.Dialog):
def __init__(self, parent):
# see http://wiki.wxpython.org/index.cgi/TwoStageCreation
pre = wx.PreDialog()
RES.LoadOnDialog(pre, parent, "BACKEND_CHOICE_DIALOG")
self.PostCreate(pre)
if cam_iface is not None:
wxctrl = xrc.XRCCTRL(self,'CAM_IFACE_LOADED')
wxctrl.SetLabel('Warning: changes will have no effect '
'until this application is restarted.')
backend_choice = xrc.XRCCTRL(self,'BACKEND_CHOICE')
for wrapper,backends in (
cam_iface_choose.wrappers_and_backends.iteritems()):
for backend in backends:
if backend == 'blank' or backend=='dummy':
continue
backend_choice.Append('%s (%s)'%(backend,wrapper))
backend_choice.SetStringSelection('%s (%s)'%(rc_params['backend'],
rc_params['wrapper']))
wxctrl = xrc.XRCCTRL(self,'SAVE_BACKEND_CHOICE')
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(),
self.OnOK)
wxctrl = xrc.XRCCTRL(self,'CANCEL_BACKEND_CHOICE')
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(),
self.OnCancel)
self.new_backend_and_wrapper = None
def OnOK(self,event):
wxctrl = xrc.XRCCTRL(self,'BACKEND_CHOICE')
string_value = wxctrl.GetStringSelection()
backend, wrapper = string_value.split()
wrapper = wrapper[1:-1]
# convert from unicode
self.new_backend_and_wrapper = (str(backend),str(wrapper))
self.SetReturnCode(wx.ID_OK)
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
self.SetReturnCode(wx.ID_CANCEL)
self.EndModal(wx.ID_CANCEL)
def _need_cam_iface():
global cam_iface
if cam_iface is None:
wrapper = rc_params['wrapper']
backend = rc_params['backend']
cam_iface = cam_iface_choose.import_backend(backend,wrapper)
class _BlockingROSQuitThread(threading.Thread):
def __init__(self, wxapp, func):
threading.Thread.__init__(self, name="rosquitthread")
self.daemon = True
self._wxapp = wxapp
self._func = func
def run(self):
if self._func is None:
while 1:
time.sleep(0.1)
else:
self._func()
event = wx.CommandEvent(FViewShutdownEvent)
event.SetEventObject(self._wxapp)
wx.PostEvent(self._wxapp, event)
class App(wx.App):
def __init__(self, fview_options, **kwargs):
self.options = fview_options.pop('options')
self._fview_options = fview_options
#wx dies loudly in __init__ if the log file is not writable, so
#do stdio redirection ourselves... safely, and to a log file specific to
#the camera id
redirect = kwargs.pop('redirect')
filename = kwargs.pop('filename')
wx.App.__init__(self, **kwargs)
if redirect and filename:
print 'logging to %s' % filename
try:
self.RedirectStdio(filename)
except IOError:
filename = tempfile.mkstemp(prefix='fview',suffix='.log')[1]
self.RedirectStdio(filename)
self.redirect = redirect
self.log_filename = filename
_ros_quit_func = None
if self._fview_options['have_ros']:
import rospy
_ros_quit_func = rospy.spin
self.quit_thread = _BlockingROSQuitThread(self, _ros_quit_func)
self.quit_thread.start()
def OnInit(self,*args,**kw):
self.save_images = 0 # save every nth image, 0 = false
self.cam_ids = {}
self.exit_code = 0
self.grab_thread = None
self.shutdown_error_info = None
wx.InitAllImageHandlers()
self.frame = wx.Frame(None, -1, "FView",size=(640,480))
self.fview_ext_trig_plugin = None
self.xrcid2validator = {}
# statusbar ----------------------------------
self.statusbar = self.frame.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-1,150,20])
# menubar ------------------------------------
menuBar = wx.MenuBar()
# File menu
filemenu = wx.Menu()
if 0:
ID_open_cam_config = wx.NewId()
filemenu.Append(
ID_open_cam_config, "Open Camera Configuration...\tCtrl-O")
wx.EVT_MENU(self, ID_open_cam_config, self.OnOpenCamConfig)
ID_save_cam_config = wx.NewId()
filemenu.Append(
ID_save_cam_config, "Save Camera Configuration...\tCtrl-S")
wx.EVT_MENU(self, ID_save_cam_config, self.OnSaveCamConfig)
filemenu.AppendItem(wx.MenuItem(parentMenu=filemenu,
kind=wx.ITEM_SEPARATOR))
ID_set_record_dir = wx.NewId()
filemenu.Append(ID_set_record_dir, "set record Directory...\tCtrl-D")
wx.EVT_MENU(self, ID_set_record_dir, self.OnSetRecordDirectory)
self.record_dir = os.environ.get('FVIEW_SAVE_PATH','')
filemenu.AppendItem(wx.MenuItem(parentMenu=filemenu,
kind=wx.ITEM_SEPARATOR))
ID_quit = wx.NewId()
filemenu.Append(ID_quit, "Quit\tCtrl-Q", "Quit application")
wx.EVT_MENU(self, ID_quit, self.OnQuit)
#wx.EVT_CLOSE(self, ID_quit, self.OnQuit)
# JAB thinks this will allow use of the window-close ('x') button
# instead of forcing users to file->quit
menuBar.Append(filemenu, "&File")
# Camera menu
cameramenu = wx.Menu()
ID_init_camera = wx.NewId()
cameramenu.Append(ID_init_camera, "initialize camera...")
wx.EVT_MENU(self, ID_init_camera, self.OnInitCamera)
ID_set_backend_choice = wx.NewId()
cameramenu.Append(ID_set_backend_choice, "backend choice...")
wx.EVT_MENU(self, ID_set_backend_choice, self.OnBackendChoice)
menuBar.Append(cameramenu, "&Camera")
# view menu
viewmenu = wx.Menu()
ID_show_live_images = wx.NewId()
self.show_live_images = viewmenu.Append(ID_show_live_images, "show live images",
"Show live images from camera", wx.ITEM_CHECK)
viewmenu.Check(ID_show_live_images,True)
ID_rotate180 = wx.NewId()
viewmenu.Append(ID_rotate180, "rotate 180 degrees",
"Rotate camera view 1800 degrees", wx.ITEM_CHECK)
wx.EVT_MENU(self, ID_rotate180, self.OnToggleRotate180)
ID_flipLR = wx.NewId() # mirror
viewmenu.Append(ID_flipLR, "flip Left/Right",
"Flip image Left/Right", wx.ITEM_CHECK)
wx.EVT_MENU(self, ID_flipLR, self.OnToggleFlipLR)
self.update_view_num = -1
self.view_interval = rc_params['view_interval']
ID_set_view_interval = wx.NewId()
viewmenu.Append(ID_set_view_interval, "Set display update interval...")
wx.EVT_MENU(self, ID_set_view_interval, self.OnSetViewInterval)
menuBar.Append(viewmenu, "&View")
# windows menu
windowsmenu = wx.Menu()
ID_settings = wx.NewId()
windowsmenu.Append(ID_settings, "Camera controls...\tCtrl-C")
wx.EVT_MENU(self, ID_settings, self.OnOpenCameraControlsWindow)
menuBar.Append(windowsmenu, "&Windows")
# plugins menu
self._load_plugins()
del_plugins = []
if len(self.plugins):
windowsmenu.AppendItem(wx.MenuItem(parentMenu=windowsmenu,
kind=wx.ITEM_SEPARATOR))
for plugin in self.plugins:
plugin_name = plugin.get_plugin_name()
if hasattr(plugin,'set_all_fview_plugins'):
try:
plugin.set_all_fview_plugins(self.plugins)
except Exception,err:
formatted_error = traceback.format_exc(err)
traceback.print_exc(err,sys.stderr)
msg = 'While attempting to open the plugin "%s",\n' \
'FView encountered an error. The error is:\n\n' \
'%s\n\n' \
'More details:\n' \
'%s'%( plugin_name, err, formatted_error )
dlg = wx.MessageDialog(self.frame, msg,
'FView plugin error',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
del_plugins.append(plugin)
continue
ID_tmp = wx.NewId()
item_tmp = wx.MenuItem(windowsmenu, ID_tmp, plugin_name+'...')
windowsmenu.AppendItem(item_tmp)
self.plugin_dict[plugin].fview_menu_wx_item = item_tmp
wx.EVT_MENU(self, ID_tmp, self.plugin_dict[plugin].OnShowFrame)
for del_plugin in del_plugins:
del self.plugins[self.plugins.index(del_plugin)]
helpmenu = wx.Menu()
ID_helpmenu = wx.NewId()
helpmenu.Append(ID_helpmenu, "About")
wx.EVT_MENU(self, ID_helpmenu, self.OnAboutFView)
menuBar.Append(helpmenu, "&Help")
# finish menubar -----------------------------
self.frame.SetMenuBar(menuBar)
# main panel ----------------------------------
self.main_panel = my_loadpanel(self.frame,"APP_PANEL")
self.main_panel.SetFocus()
frame_box = wx.BoxSizer(wx.VERTICAL)
frame_box.Add(self.main_panel,1,wx.EXPAND)
self.frame.SetSizer(frame_box)
self.frame.Layout()
## # main panel
main_display_panel = xrc.XRCCTRL(self.main_panel,"MAIN_DISPLAY_PANEL")
box = wx.BoxSizer(wx.VERTICAL)
main_display_panel.SetSizer(box)
self.cam_image_canvas = video_module.DynamicImageCanvas(
main_display_panel,-1)
self.cam_image_canvas.x_border_pixels = 0
self.cam_image_canvas.y_border_pixels = 0
box.Add(self.cam_image_canvas,1,wx.EXPAND)
main_display_panel.SetAutoLayout(True)
main_display_panel.Layout()
# DONE WITH WX INIT STUFF
self.grabbed_fnt = []
self.thread_done = threading.Event()
self.max_priority_enabled = threading.Event()
self.quit_now = threading.Event()
#AR self.app_ready = threading.Event()
self.cam_fps_value = SharedValue()
self.framerate = SharedValue()
self.num_buffers = SharedValue()
self.last_measurement_time = time.time()
self.last_image = None
self.last_image_fullsize = (0,0)
self.last_offset = 0,0
self.new_image = False
self.fly_movie = None
# MORE WX STUFF
# camera control panel
self.cam_control_frame = wx.Frame(
self.frame, -1, "FView: Camera Control")
self.cam_control_panel = my_loadpanel(
self.cam_control_frame,"CAMERA_CONTROLS_PANEL")
self.cam_control_panel.Fit()
self.cam_control_frame.Fit()
self.cam_settings_panel = xrc.XRCCTRL(
self.cam_control_panel, "CAM_SETTINGS_PANEL")
self.cam_framerate_panel = xrc.XRCCTRL(
self.cam_control_panel, "CAM_FRAMERATE_PANEL")
self.cam_roi_panel = xrc.XRCCTRL(
self.cam_control_panel, "CAM_ROI_PANEL")
self.cam_record_panel = xrc.XRCCTRL(
self.cam_control_panel, "CAM_RECORD_PANEL")
# Camera framerate frame ----------------------------
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_FRAMERATE")
self.xrcid2validator["CAM_FRAMERATE"] = (
wxvt.setup_validated_float_callback(
wxctrl,
wxctrl.GetId(),
self.OnSetFramerate,
ignore_initial_value=True))
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_NUM_BUFFERS")
self.xrcid2validator["CAM_NUM_BUFFERS"] = (
wxvt.setup_validated_integer_callback(
wxctrl,
wxctrl.GetId(),
self.OnSetNumBuffers,
ignore_initial_value=True))
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "EXTERNAL_TRIGGER_MODE")
wx.EVT_CHOICE(wxctrl, wxctrl.GetId(), self.OnSetTriggerMode)
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_FRAMERATE_QUERY")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(), self.OnGetFramerate)
# Camera roi frame ----------------------------
self.ignore_text_events = True
self.roi_xrcids = [
"ROI_LEFT","ROI_RIGHT","ROI_BOTTOM","ROI_TOP",
"ROI_WIDTH","ROI_HEIGHT"]
for xrcid in self.roi_xrcids:
wxctrl = xrc.XRCCTRL( self.cam_roi_panel, xrcid)
validator = wxvt.setup_validated_integer_callback(
wxctrl, wxctrl.GetId(), self.OnSetROI,
ignore_initial_value=True)
self.xrcid2validator[xrcid] = validator
self.ignore_text_events = False
wxctrl = xrc.XRCCTRL( self.cam_roi_panel, "ROI_QUERY_CAMERA")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(), self.OnUpdateROIPanel)
wxctrl = xrc.XRCCTRL( self.cam_roi_panel, "ROI_FULL_FRAME")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(), self.OnFullFrameROI)
# Camera record frame ----------------------------
self.recording_fmf = None
wxctrl = xrc.XRCCTRL( self.cam_record_panel, "NTH_FRAME_TEXT")
wxvt.setup_validated_integer_callback(wxctrl,
wxctrl.GetId(),
self.OnNthFrameChange)
wxctrl = xrc.XRCCTRL( self.cam_record_panel, "save_fno_as_timestamp")
self.save_fno=wxctrl.IsChecked()
wx.EVT_CHECKBOX(wxctrl, wxctrl.GetId(), self.OnChangeSaveFNoAsTimestamp)
wxctrl = xrc.XRCCTRL( self.cam_record_panel,
"update_display_while_saving")
self.update_display_while_saving = wxctrl.IsChecked()
wx.EVT_CHECKBOX(wxctrl, wxctrl.GetId(),
self.OnToggleUpdateDisplayWhileSaving)
wxctrl = xrc.XRCCTRL( self.cam_record_panel, "START_RECORD_BUTTON")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(),
self.OnStartRecord)
wxctrl = xrc.XRCCTRL( self.cam_record_panel, "STOP_RECORD_BUTTON")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(),
self.OnStopRecord)
wxctrl = xrc.XRCCTRL( self.cam_record_panel,
'MOVIE_FNAME_PREFIX')
wxctrl.SetValue(rc_params['movie_fname_prefix'])
self.fname_prefix_validator = wxvt.Validator( wxctrl, wxctrl.GetId(),
self.OnFnamePrefix,
self.filename_validator_func,
ignore_initial_value=True)
# Set view options
viewmenu.Check(ID_rotate180,rc_params['rotate180'])
self.cam_image_canvas.set_rotate_180( viewmenu.IsChecked(ID_rotate180) )
for plugin in self.plugins:
if not hasattr(plugin,'set_view_rotate_180'):
print ('ERROR: plugin "%s" needs set_view_rotate_180() '
'method'%(plugin,))
continue
plugin.set_view_rotate_180( viewmenu.IsChecked(ID_rotate180) )
viewmenu.Check(ID_flipLR,rc_params['flipLR'])
self.cam_image_canvas.set_flip_LR( viewmenu.IsChecked(ID_flipLR) )
for plugin in self.plugins:
plugin.set_view_flip_LR( viewmenu.IsChecked(ID_flipLR) )
# finalize wx stuff
self.frame.SetAutoLayout(True)
self.frame.Show()
self.SetTopWindow(self.frame)
wx.EVT_CLOSE(self.frame, self.OnWindowClose)
self.cam_wait_msec_wait = 100
ID_Timer2 = wx.NewId()
self.timer2 = wx.Timer(self, ID_Timer2)
wx.EVT_TIMER(self, ID_Timer2, self.OnFPS)
self.update_interval2=5000
self.timer2.Start(self.update_interval2)
self.image_update_lock = threading.Lock()
self.save_info_lock = threading.Lock()
self.cam = None
self.update_display_while_saving = True
self.Connect(
-1, -1, CamPropertyDataReadyEvent, self.OnCameraPropertyDataReady )
self.Connect(
-1, -1, CamROIDataReadyEvent, self.OnCameraROIDataReady )
self.Connect(
-1, -1, CamFramerateReadyEvent, self.OnFramerateDataReady )
self.Connect(
-1, -1, FViewShutdownEvent, self.OnQuit )
self.Connect(
-1, -1, ImageReadyEvent, self.OnUpdateCameraView )
return True
def _load_plugins(self):
result = load_plugins(
self.frame,
use_plugins=self.options.plugins,
return_plugin_names=self.options.show_plugins,
**self._fview_options
)
if self.options.show_plugins:
#ensure this message goes to stdout and not the log file
#(because we were asked to do this via command line anyway)
self.RestoreStdio()
print 'plugin description'
print '------ -----------'
for i,plugin in enumerate(result):
print ' ',i,plugin
sys.exit(0)
plugins, plugin_dict, bad_plugins, plugin_names = result
if self.options.plugins is not None:
fail = False
for desired_plugin in self.options.plugins:
if desired_plugin not in plugin_names:
print 'ERROR: you requested plugin %r, but it was not loaded'%desired_plugin
fail = True
if fail:
sys.exit(1)
self.plugins = plugins
self.plugin_dict = plugin_dict
if len(bad_plugins):
for name, (err,full_err) in bad_plugins.iteritems():
msg = 'While attempting to open the plugin "%s",\n' \
'FView encountered an error. The error is:\n\n' \
'%s\n\n'%( name, err )
dlg = wx.MessageDialog(self.frame, msg,
'FView plugin error',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
if self.options.show_plugins:
print 'plugin description'
print '------ -----------'
for i,plugin in enumerate(self.plugins):
print ' ',i,plugin
sys.exit(0)
def OnAboutFView(self, event):
_need_cam_iface()
driver = cam_iface.get_driver_name()
wrapper = cam_iface.get_wrapper_name()
py_libinfo,c_libinfo = cam_iface.get_library_info()
disp = 'FView %s\n'%__version__
disp += '---------------------------------\n'
for k,v in self._fview_options.iteritems():
if k == 'options':
continue
else:
disp += '%s: %r\n' % (k,v)
disp += '\nlibcamiface details:\n'
disp += '\tdriver: %s\n\twrapper: %s\n'%(driver,wrapper)
disp += 'pylibcamiface:\n\tloaded: %s\n\tversion: %s\n' % (py_libinfo)
disp += ' libcamiface:\n\tloaded: %s\n\tversion: %s\n' % (c_libinfo)
disp += '\nplugin details:\n'
disp += '---------------------------------\n'
for plugin in self.plugins:
disp += plugin.get_plugin_name() + '\n'
dlg = wx.MessageDialog(self.frame, disp,
'About FView',
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnInitCamera(self, event):
try:
_need_cam_iface()
if self.cam is not None:
dlg = wx.MessageDialog(
self.frame, 'A camera may only be initialized once',
'FView error',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
return
driver_name = cam_iface.get_driver_name()
num_cameras = cam_iface.get_num_cameras()
cam_info = []
bad_cameras = False
for idx in range(num_cameras):
try:
vendor, model, chip = cam_iface.get_camera_info(idx)
mode_choice_strings = []
for mode_number in range( cam_iface.get_num_modes(idx) ):
mode_choice_strings.append(
cam_iface.get_mode_string(idx,mode_number))
# closes for loop
except cam_iface.CamIFaceError, err:
traceback.print_exc(err,sys.stderr)
bad_cameras = True
cam_info.append( None )
continue
cam_name_string = "num_buffers('%s','%s')"%(vendor,model)
if cam_name_string in rc_params:
num_buffers = rc_params[cam_name_string]
else:
if vendor == 'Basler' and model == 'A602f':
num_buffers = 100
elif vendor == 'Basler' and model == 'A622f':
num_buffers = 50
elif vendor == 'Unibrain' and model == 'Fire-i BCL 1.2':
num_buffers = 100
elif vendor == 'Unibrain' and model == 'Fire-i BBW 1.3':
num_buffers = 100
elif vendor == 'Point Grey Research' and model=='Scorpion':
num_buffers = 32
else:
num_buffers = 32
if sys.platform.startswith('win'):
num_buffers = 10 # for some reason, this seems to be the max, at least with CMU1394
cam_info.append( dict(vendor=vendor,
model=model,
chip=chip,
num_buffers=num_buffers,
mode_choice_strings=mode_choice_strings,
) )
dlg = InitCameraDialog(self.frame, -1, "Select camera & parameters",
size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
cam_info=cam_info)
res = dlg.ShowModal()
cam_no_selected = None
if res == wx.ID_OK and len(dlg.radios):
for idx in range(len(dlg.radios)):
if dlg.radios[idx].GetValue():
cam_no_selected = idx
num_buffers = int(dlg.num_buffers[idx].GetValue())
else:
return
#move logfile to be camera specific
if self.log_filename is not None:
self.log_filename = self.log_filename + ('.%d' % cam_no_selected)
if self.redirect:
self.RedirectStdio(self.log_filename)
vendor, model, chip = cam_iface.get_camera_info(cam_no_selected)
cam_name_string = "num_buffers('%s','%s')"%(vendor,model)
rc_params[cam_name_string] = num_buffers
save_rc_params()
# allocate 400 MB then delete, just to get some respec' from OS:
nx.zeros((400*1024,),nx.uint8)
assert cam_info[cam_no_selected] is not None
mode_choice = cam_info[cam_no_selected]['mode_choice_control']
mode_number = mode_choice.GetSelection()
try:
self.cam = cam_iface.Camera(cam_no_selected,
num_buffers,
mode_number
)
except cam_iface.CamIFaceError, x:
dlg = wx.MessageDialog(self.frame, str(x),
'Error opening camera',
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
raise
vendor, model, chip = cam_iface.get_camera_info(cam_no_selected)
self.cam_ids[self.cam] = chip
format = self.cam.get_pixel_coding()
self.statusbar.SetStatusText('Connected to %s %s (%s)'%(
vendor, model, format),0)
self.property_callback_funcs = {}
self.roi_callback_funcs = []
self.framerate_callback_funcs = []
# set external trigger modes
try:
trigger_mode = self.cam.get_trigger_mode_number()
except cam_iface.CamIFaceError:
print 'Error getting trigger mode number'
trigger_mode = None
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "EXTERNAL_TRIGGER_MODE")
for i in range(self.cam.get_num_trigger_modes()):
trigger_mode_string = self.cam.get_trigger_mode_string(i)
wxctrl.Append(trigger_mode_string)
if trigger_mode is not None:
wxctrl.SetSelection(trigger_mode)
else:
wxctrl.SetSelection(0)
self.register_framerate_query_callback( self.OnReceiveFramerate )
self.host_timestamp_ctrl = xrc.XRCCTRL( self.cam_framerate_panel,
"use_host_timestamps")
wx.EVT_CHECKBOX(self.host_timestamp_ctrl, self.host_timestamp_ctrl.GetId(),
self.OnUseHostTimestamps)
cphs = []
n_props = self.cam.get_num_camera_properties()
# info from grab thread to GUI thread
self.cam_prop_get_queue = Queue.Queue()
self.cam_roi_get_queue = Queue.Queue()
self.framerate_get_queue = Queue.Queue()
# commands from GUI thread to grab thread
self.cam_cmd_queue = Queue.Queue()
self._mainthread_roi = None
self.register_roi_query_callback( self.OnReceiveROI)
self.cam_cmd_queue.put( ('ROI query',None) )
self.cam_cmd_queue.put( ('framerate query',None) )
auto_cam_settings_panel = xrc.XRCCTRL(
self.cam_settings_panel, "AUTO_CAM_SETTINGS_PANEL")
acsp_sizer = wx.FlexGridSizer(n_props) # guesstimate
n_rows = 0
n_cols = 4
for prop_num in range(n_props):
auto_cam_settings_panel.Hide()
cph = CameraParameterHelper( self.cam,
auto_cam_settings_panel,
acsp_sizer,
prop_num,
self)
auto_cam_settings_panel.Show()
if cph.present:
n_rows += 1
cphs.append( cph )
if not len(cphs):
acsp_sizer.AddGrowableCol(0)
statictext = wx.StaticText(
auto_cam_settings_panel,
label='(No properties present on this camera)')
n_rows += 1
acsp_sizer.Add(statictext,1,flag=wx.ALIGN_CENTRE|wx.EXPAND)
else:
acsp_sizer.AddGrowableCol(1)
acsp_sizer.SetRows(n_rows)
acsp_sizer.SetCols(n_cols)
auto_cam_settings_panel.SetSizer(acsp_sizer)
auto_cam_settings_panel.Layout()
for cph in cphs:
non_self_cphs = [cph2 for cph2 in cphs if cph2 is not cph]
for cph2 in non_self_cphs:
cph.AddToUpdate( cph2 )
self.cam_param_helpers = cphs
wxctrl = xrc.XRCCTRL( self.cam_settings_panel, "QUERY_CAMERA_SETTINGS")
wx.EVT_BUTTON(wxctrl, wxctrl.GetId(),
self.OnQueryCameraSettings)
# query camera settings to initially fill window
#self.OnQueryCameraSettings(None)
# re-fit the camera control window
self.cam_control_panel.Fit()
self.cam_control_frame.Fit()
# send plugins information that camera is starting
format = self.cam.get_pixel_coding()
bad_plugins = []
for plugin in self.plugins:
try:
plugin.camera_starting_notification(
self.cam_ids[self.cam],
pixel_format=format,
max_width=self.cam.get_max_width(),
max_height=self.cam.get_max_height())
except Exception, err:
traceback.print_exc(err,sys.stderr)
if self.log_filename is None:
log_filename_str = ''
else:
log_filename_str = ' See\n\n%s'%(self.log_filename,)
msg = 'An FView plugin "%s" failed: %s\n\n'\
'The plugin will now be disabled, and '\
'the log will have more details.%s'%(
plugin.plugin_name,
str(err),log_filename_str)
dlg = wx.MessageDialog(self.frame,msg,
'FView plugin error',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
bad_plugins.append( plugin )
for bad_plugin in bad_plugins:
self.plugin_dict[bad_plugin].fview_menu_wx_item.Enable(False)
self.plugin_dict[bad_plugin].Destroy()
del self.plugins[ self.plugins.index(bad_plugin) ]
for plugin in self.plugins:
if plugin.get_plugin_name() == 'FView external trigger':
self.fview_ext_trig_plugin = plugin
if self.fview_ext_trig_plugin.trigger_device.real_device:
self.cam_cmd_queue.put(('timestamp source','CamTrig'))
self.host_timestamp_ctrl.Enable(False)
self.pixel_coding = format
self.cam_max_width = self.cam.get_max_width()
self.cam_max_height = self.cam.get_max_height()
# start threads
grab_thread = threading.Thread( target=grab_func,
args=(self,
self.image_update_lock,
self.cam,
self.cam_ids[self.cam],
self.max_priority_enabled,
self.quit_now,
self.thread_done,
self.cam_fps_value,
self.framerate,
self.num_buffers,
self.plugins,
self.cam_prop_get_queue,
self.cam_roi_get_queue,
self.framerate_get_queue,
self.cam_cmd_queue,
self.fview_ext_trig_plugin,
))
grab_thread.setDaemon(True)
grab_thread.start()
self.grab_thread = grab_thread
save_thread = threading.Thread( target=save_func,
args=(self,
self.save_info_lock,
self.quit_now,
))
save_thread.setDaemon(True)
save_thread.start()
except Exception,err:
if self.log_filename is None:
log_filename_str = ''
else:
log_filename_str = '%s\n\n'%(self.log_filename,)
dlg = wx.MessageDialog(
self.frame, ('An unknown error accessing the camera was '
'encountered. The log file will have details. '
'\n\n%sFView will now exit. The error '
'was:\n%s'%(log_filename_str,str(err),)),
'FView error',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
traceback.print_exc(err,sys.stderr)
self.exit_code = 1
self.OnQuit()
def register_property_query_callback( self, prop_num, callback_func):
self.property_callback_funcs.setdefault(prop_num,[]).append(
callback_func)
def OnCameraPropertyDataReady(self, event):
try:
while 1:
data = self.cam_prop_get_queue.get_nowait()
(prop_num, current_value, is_set) = data
for cb_func in self.property_callback_funcs.get(prop_num,[]):
cb_func( current_value, is_set )
except Queue.Empty:
pass
def register_roi_query_callback(self, callback_func):
self.roi_callback_funcs.append(callback_func)
def register_framerate_query_callback(self, callback_func):
self.framerate_callback_funcs.append(callback_func)
def OnCameraROIDataReady(self, event):
try:
while 1:
data = self.cam_roi_get_queue.get_nowait()
(l,b,r,t) = data
for cb_func in self.roi_callback_funcs:
cb_func( l,b,r,t )
except Queue.Empty:
pass
def OnFramerateDataReady(self, event):
try:
while 1:
data = self.framerate_get_queue.get_nowait()
current_framerate, trigger_mode, num_buffers = data
for cb_func in self.framerate_callback_funcs:
cb_func( current_framerate, trigger_mode, num_buffers )
except Queue.Empty:
pass
def enqueue_property_change( self, cmd):
self.cam_cmd_queue.put( ('property change',cmd) )
def OnSetViewInterval(self,event):
dlg=wx.TextEntryDialog(self.frame, 'Display every Nth frame, where N is:',
'Set view interval',str(self.view_interval))
try:
if dlg.ShowModal() == wx.ID_OK:
interval = int(dlg.GetValue())
rc_params['view_interval'] = interval
save_rc_params()
self.view_interval = interval
finally:
dlg.Destroy()
def OnSetRecordDirectory(self, event):
dlg = wx.DirDialog( self.frame, "Movie record directory",
style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON,
defaultPath = self.record_dir,
)
try:
if dlg.ShowModal() == wx.ID_OK:
self.record_dir = dlg.GetPath()
finally:
dlg.Destroy()
def OnFnamePrefix(self, event):
widget = event.GetEventObject()
new_value = widget.GetValue()
rc_params['movie_fname_prefix'] = new_value
save_rc_params()
def filename_validator_func(self,input_string):
# Could complain about invalid filename characters here...
return True
def OnBackendChoice(self, event):
dlg = BackendChoiceDialog(self.frame)
try:
if dlg.ShowModal() == wx.ID_OK:
if dlg.new_backend_and_wrapper is not None:
backend,wrapper = dlg.new_backend_and_wrapper
rc_params['wrapper'] = wrapper
rc_params['backend'] = backend
save_rc_params()
finally:
dlg.Destroy()
def OnQueryCameraSettings(self, event):
self.cam_cmd_queue.put( ('property query',None) )
def OnToggleRotate180(self, event):
self.cam_image_canvas.set_rotate_180( event.IsChecked() )
rc_params['rotate180'] = event.IsChecked()
save_rc_params()
for plugin in self.plugins:
if not hasattr(plugin,'set_view_rotate_180'):
print ('ERROR: plugin "%s" needs set_view_rotate_180() '
'method'%(plugin,))
continue
plugin.set_view_rotate_180( event.IsChecked() )
def OnToggleFlipLR(self, event):
self.cam_image_canvas.set_flip_LR( event.IsChecked() )
rc_params['flipLR'] = event.IsChecked()
save_rc_params()
for plugin in self.plugins:
plugin.set_view_flip_LR( event.IsChecked() )
def OnSetFramerate(self, event):
if self.ignore_text_events:
return
widget = event.GetEventObject()
fr_string = widget.GetValue()
try:
fr = float(fr_string)
except ValueError:
return
self.framerate.set(fr)
def OnReceiveFramerate(self,framerate,trigger_mode,num_buffers):
self.ignore_text_events = True
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_FRAMERATE")
wxctrl.SetValue(str(framerate))
self.xrcid2validator["CAM_FRAMERATE"].set_state('valid')
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_NUM_BUFFERS")
wxctrl.SetValue(str(num_buffers))
self.xrcid2validator["CAM_NUM_BUFFERS"].set_state('valid')
self.ignore_text_events = False
wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "EXTERNAL_TRIGGER_MODE")
if trigger_mode is not None:
wxctrl.SetSelection(trigger_mode)
else:
wxctrl.SetSelection(0)
def OnGetFramerate(self, event):
self.cam_cmd_queue.put( ('framerate query',None) )
## #framerate=self.cam.get_framerate()#XXX bad to cross thread boundary!
### num_buffers = self.cam.get_num_framebuffers()
#### try:
#### trigger_mode = self.cam.get_trigger_mode_number()
#### except cam_iface.CamIFaceError:
#### trigger_mode = None
## trigger_mode = None
## self.ignore_text_events = True
#### wxctrl = xrc.XRCCTRL( self.cam_framerate_panel, "CAM_NUM_BUFFERS")
#### wxctrl.SetValue(str(num_buffers))
#### self.xrcid2validator["CAM_NUM_BUFFERS"].set_state('valid')
## self.ignore_text_events = False
## wxctrl=xrc.XRCCTRL(self.cam_framerate_panel, "EXTERNAL_TRIGGER_MODE")
## if trigger_mode is not None:
## wxctrl.SetSelection(trigger_mode)
## else:
## wxctrl.SetSelection(0)
def OnSetNumBuffers(self, event):
if self.ignore_text_events:
return
widget = event.GetEventObject()
fr_string = widget.GetValue()
try:
fr = int(fr_string)
except ValueError:
return
self.num_buffers.set(fr)
def OnSetTriggerMode(self,event):
widget = event.GetEventObject()
val = widget.GetSelection()
self.cam_cmd_queue.put(('TriggerMode Set',val))
def OnUseHostTimestamps(self,event):
widget = event.GetEventObject()
val = widget.IsChecked()
if val:
self.cam_cmd_queue.put(('timestamp source','host clock'))
else:
self.cam_cmd_queue.put(('timestamp source','camera driver'))
def OnSetROI(self, event):
if self.ignore_text_events:
return
widget = event.GetEventObject()
widget_left = xrc.XRCCTRL( self.cam_roi_panel, "ROI_LEFT" )
widget_bottom = xrc.XRCCTRL( self.cam_roi_panel, "ROI_BOTTOM" )
widget_right = xrc.XRCCTRL( self.cam_roi_panel, "ROI_RIGHT" )
widget_top = xrc.XRCCTRL( self.cam_roi_panel, "ROI_TOP" )
widget_width = xrc.XRCCTRL( self.cam_roi_panel, "ROI_WIDTH" )
widget_height = xrc.XRCCTRL( self.cam_roi_panel, "ROI_HEIGHT" )
if widget in (widget_right,widget_top):
is_right_top = True
else:
is_right_top = False
####
l = int(widget_left.GetValue())
b = int(widget_bottom.GetValue())
if is_right_top:
r = int(widget_right.GetValue())
t = int(widget_top.GetValue())
if r<l: return
if t<b: return
w = r-l
h = t-b
else:
w = int(widget_width.GetValue())
h = int(widget_height.GetValue())
r = l+w
t = b+h
####
if (l>=0 and r<=self.cam_max_width and b>=0 and t<=self.cam_max_height):
lbrt = l,b,r,t
self.cam_cmd_queue.put(('ROI set',lbrt))
self.ignore_text_events = True # prevent infinte recursion
widget_left.SetValue(str(l))
self.xrcid2validator["ROI_LEFT"].set_state('valid')
widget_bottom.SetValue(str(b))
self.xrcid2validator["ROI_BOTTOM"].set_state('valid')
widget_right.SetValue(str(r))
self.xrcid2validator["ROI_RIGHT"].set_state('valid')
widget_top.SetValue(str(t))
self.xrcid2validator["ROI_TOP"].set_state('valid')
widget_width.SetValue(str(w))
self.xrcid2validator["ROI_WIDTH"].set_state('valid')
widget_height.SetValue(str(h))
self.xrcid2validator["ROI_HEIGHT"].set_state('valid')
self.ignore_text_events = False
else:
print 'ignoring invalid ROI command',l,b,r,t
self.OnUpdateROIPanel() # reset wx indicators
def OnFullFrameROI(self,event):
lbrt = 0,0,self.cam_max_width,self.cam_max_height
self.cam_cmd_queue.put(('ROI set',lbrt))
def OnReceiveROI(self,l,b,r,t):
self._mainthread_roi = (l,b,r,t)
self.OnQueryROI()
self.OnUpdateROIPanel()
def _get_lbrt(self):
return self._mainthread_roi
def OnQueryROI(self, event=None):
lbrt=self._get_lbrt()
self.cam_image_canvas.set_lbrt('camera',lbrt)
# it's a hack to put this here, but doesn't really harm anything
if self.max_priority_enabled.isSet():
self.statusbar.SetStatusText('+',2)
else:
self.statusbar.SetStatusText('-',2)
def OnUpdateROIPanel(self, event=None):
result=self._get_lbrt()
if result is None:
return
l,b,r,t = result
self.ignore_text_events = True
xrc.XRCCTRL( self.cam_roi_panel, "ROI_LEFT" ).SetValue(str(l))
self.xrcid2validator["ROI_LEFT"].set_state('valid')
xrc.XRCCTRL( self.cam_roi_panel, "ROI_BOTTOM" ).SetValue(str(b))
self.xrcid2validator["ROI_BOTTOM"].set_state('valid')
xrc.XRCCTRL( self.cam_roi_panel, "ROI_RIGHT" ).SetValue(str(r))
self.xrcid2validator["ROI_RIGHT"].set_state('valid')
xrc.XRCCTRL( self.cam_roi_panel, "ROI_TOP" ).SetValue(str(t))
self.xrcid2validator["ROI_TOP"].set_state('valid')
xrc.XRCCTRL( self.cam_roi_panel, "ROI_WIDTH" ).SetValue(str(r-l))
self.xrcid2validator["ROI_WIDTH"].set_state('valid')
xrc.XRCCTRL( self.cam_roi_panel, "ROI_HEIGHT" ).SetValue(str(t-b))
self.xrcid2validator["ROI_HEIGHT"].set_state('valid')
self.ignore_text_events = False
def OnChangeSaveFNoAsTimestamp(self, event):
self.save_fno=event.IsChecked()
def OnToggleUpdateDisplayWhileSaving(self,event):
self.update_display_while_saving = event.IsChecked()
def OnNthFrameChange(self,event):
pass # do nothing
def OnStartRecord(self, event):
if not self.save_images:
prefix = xrc.XRCCTRL( self.cam_record_panel,
'MOVIE_FNAME_PREFIX').GetValue()
nth_frame_ctrl = xrc.XRCCTRL(
self.cam_record_panel, "NTH_FRAME_TEXT")
try:
nth_frame = int(nth_frame_ctrl.GetValue())
if nth_frame < 1:
raise ValueError('only values >=1 allowed')
except ValueError,err:
dlg = wx.MessageDialog(
self.frame, 'Nth frame setting warning:\n %s'%(str(err),),
'FView warning',
wx.OK | wx.ICON_INFORMATION
)
dlg.Show()
nth_frame = 1
filename = prefix + time.strftime( '%Y%m%d_%H%M%S.fmf' )
fullpath = os.path.join( self.record_dir, filename )
self.start_streaming(fullpath,nth_frame)
if nth_frame == 1:
self.statusbar.SetStatusText('saving to %s'%(filename,),0)
else:
self.statusbar.SetStatusText(
'saving to %s (every 1 of %d frames)'%(filename,nth_frame)
,0)
def OnStopRecord(self, event):
if self.save_images:
self.stop_streaming()
self.statusbar.SetStatusText('',0)
def OnFPS(self, evt):
if self.grab_thread is not None:
if not self.grab_thread.isAlive():
self.grab_thread = None # only show this once
if self.log_filename is None:
log_filename_str = ''
else:
log_filename_str = '%s\n\n'%(self.log_filename,)
dlg = wx.MessageDialog(
self.frame,
'the camera thread appears to have died unexpectedly. '
'The log file will have more details.\n\n%s' % log_filename_str,
'FView Error',
wx.OK | wx.ICON_ERROR)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
fps_value = self.cam_fps_value
if fps_value.is_new_value_waiting():
fps,good_fps = fps_value.get_nowait()
if fps==good_fps:
self.statusbar.SetStatusText('~%.1f fps'%(fps,),1)
else:
self.statusbar.SetStatusText('~%.1f/%.1f fps'%(good_fps,fps),1)
def OnOpenCameraControlsWindow(self, evt):
self.cam_control_frame.Show(True)
self.cam_control_frame.Raise()
wx.EVT_CLOSE(self.cam_control_frame, self.OnCloseCameraControlsWindow)
def OnCloseCameraControlsWindow(self, evt):
self.cam_control_frame.Show(False)
def OnUpdateCameraView(self, evt):
#AR self.app_ready.set() # tell grab thread to start
try:
self.update_view_num += 1
if (self.update_view_num % self.view_interval) != 0:
return
if USE_DEBUG:
sys.stdout.write('R')
sys.stdout.flush()
if self.save_images:
if not self.update_display_while_saving:
return
if not self.show_live_images.IsChecked():
return
# copy stuff ASAP
self.image_update_lock.acquire()
if self.new_image:
new_image = True
last_image = self.last_image
last_fullsize = self.last_image_fullsize
last_offset = self.last_offset
self.new_image = False
points = self.plugin_points
linesegs = self.plugin_linesegs
else:
new_image = False
# release lock ASAP
self.image_update_lock.release()
# now draw
if new_image:
last_image = nx.asarray(last_image) # convert to numpy view
fullw,fullh = last_fullsize
if last_image.shape != (fullh,fullw):
xoffset=last_offset[0]
yoffset=last_offset[1]
h,w=last_image.shape
linesegs.extend(
[(xoffset, yoffset,
xoffset, yoffset+h),
(xoffset, yoffset+h,
xoffset+w, yoffset+h),
(xoffset+w, yoffset+h,
xoffset+w, yoffset),
(xoffset+w, yoffset,
xoffset, yoffset),
] )
self.cam_image_canvas.update_image_and_drawings(
'camera',
last_image,
format=self.pixel_coding,
points=points,
linesegs=linesegs,
xoffset=last_offset[0],
yoffset=last_offset[1],
)
self.cam_image_canvas.Refresh(eraseBackground=False)
except Exception,err:
if self.log_filename is None:
log_filename_str = ''
else:
log_filename_str = '%s\n\n'%(self.log_filename,)
self.shutdown_error_info=(
('An unknown error updating the screen was '
'encountered. The log file will have details. '
'\n\n%sFView will now exit. The error '
'was:\n%s'%(log_filename_str,str(err),)),
'FView error',)
self.exit_code = 1
event = wx.CommandEvent(FViewShutdownEvent)
event.SetEventObject(self)
wx.PostEvent(self, event)
raise
def OnWindowClose(self, event):
self.timer2.Stop()
self.quit_now.set()
for plugin in self.plugins:
plugin.quit()
self.thread_done.wait(0.1) # block until grab thread is done...
event.Skip() # propagate event up the chain...
def OnQuit(self, dummy_event=None):
self.quit_now.set()
# normal or error exit
if self.shutdown_error_info is not None:
msg,title = self.shutdown_error_info
dlg = wx.MessageDialog(self.frame,msg,title,
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
self.frame.Close() # results in call to OnWindowClose()
if self.exit_code != 0:
sys.exit(self.exit_code)
def start_streaming(self,filename,nth_frame):
# XXX bad to cross thread boundary!
format = self.cam.get_pixel_coding()
depth = self.cam.get_pixel_depth()
assert (self.cam.get_pixel_depth() % 8 == 0)
self.save_info_lock.acquire()
self.fly_movie = FlyMovieFormat.FlyMovieSaver(filename,
version=3,
format=format,
bits_per_pixel=depth,
)
self.save_images = nth_frame
self.save_info_lock.release()
def stop_streaming(self):
self.save_info_lock.acquire()
self.save_images = False
self.fly_movie.close()
self.fly_movie = None
self.save_info_lock.release()
def main():
global cam_iface
if int(os.environ.get('FVIEW_NO_REDIRECT','0')):
log_filename = None
redirect = False
else:
#Try first in ~/.cache (on modern linux systems)
home = os.path.expanduser('~')
log = 'fview.log'
if os.path.isdir(os.path.join(home,'.cache')):
log_filename = os.path.join(home,'.cache',log)
else:
log_filename = os.path.join(home,log)
redirect = True
ros_msg = ""
have_ros = False
if not int(os.environ.get('FVIEW_NO_ROS', '0')):
try:
import roslib
import roslib.packages
roslib.load_manifest('rospy')
import rospy
#/run_id is always set, so attempting to get this parameter
rospy.get_param('/run_id')
rospy.init_node('fview', anonymous=True, disable_signals=True)
have_ros = True
ros_msg = "ROS enabled: version '%s' detected" % rospy.get_param('/rosdistro', 'unknown')
except ImportError:
#no roslib
ros_msg = "ROS disabled: no roslib installed"
except KeyError, e:
if '/run_id' in str(e):
#no /run_id parameter (roscore needs to be restarted). you probbably
#did rosparam delete /
ros_msg = "ROS disabled: roscore needs restarting (missing /run_id)"
else:
#ROS_ROOT or some other ROS environment variable missing
ros_msg = "ROS disabled: invalid install - %s not found" % e
except socket.error:
#roscore not running (the get_param call failed)
ros_msg = "ROS disabled: roscore not running"
except Exception, e:
if 'package' in str(e):
ros_msg = "ROS disabled: could not find rospy package"
else:
ros_msg = "ROS disabled: unknown error: %s" % e
print ros_msg
kw = dict(redirect=redirect,filename=log_filename)
usage = '%prog [options]'
parser = OptionParser(usage)
parser.add_option("--plugins", type='string',
help="choose multiple plugins (e.g. '2,3')",
default=None)
parser.add_option("--show-plugins", action='store_true',
help="show plugin numbers and names (then quit)",
default=False)
(options, args) = parser.parse_args()
if options.plugins is not None:
options.plugins = [p for p in options.plugins.split(',') if p != '']
fview_options = {
"options":options,
"have_ros":have_ros,
"have_opengl":have_opengl
}
app = App(fview_options,**kw)
if 0:
# run under profiler
import hotshot
prof = hotshot.Profile("fview.hotshot")
res = prof.runcall(app.MainLoop)
prof.close()
else:
# run normally
app.MainLoop()
if hasattr(cam_iface,'shutdown'):
cam_iface.shutdown()
if __name__=='__main__':
main()
|
motmot/fview
|
motmot/fview/fview.py
|
Python
|
bsd-3-clause
| 81,270
|
[
"Firefly"
] |
536f28aeaefbc2dc55ecc2fa3de8e74780f021946fb0d6324c0b14ef0480aca6
|
from .album import AlbumGenerator, Album
from .band import BandGenerator, Band
"""
<li><a href="http://www.fantasynamegenerators.com/afterlife-names.php">Afterlife Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/alliance-names.php">Alliance Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/animal-group-names.php">Animal Group Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/apocalypse-names.php">Apocalypse Names</a></li>
<li style="font-size: 100%; padding: 3px 0.5%;">Armor Names >
<ol>
<li><a href="http://www.fantasynamegenerators.com/belt-names.php">Belts</a></li>
<li><a href="http://www.fantasynamegenerators.com/boots-names.php">Boots</a></li>
<li><a href="http://www.fantasynamegenerators.com/vambrace-names.php">Bracers</a></li>
<li><a href="http://www.fantasynamegenerators.com/chest-names.php">Chests</a></li>
<li><a href="http://www.fantasynamegenerators.com/cloak-names.php">Cloaks</a></li>
<li><a href="http://www.fantasynamegenerators.com/gauntlet-names.php">Gloves & Gauntlets</a></li>
<li><a href="http://www.fantasynamegenerators.com/helmet-names.php">Helmets</a></li>
<li><a href="http://www.fantasynamegenerators.com/leg-names.php">Legs</a></li>
<li><a href="http://www.fantasynamegenerators.com/pauldron-names.php">Pauldrons</a></li>
<li><a href="http://www.fantasynamegenerators.com/shield-names.php">Shields</a></li>
</ol>
</li>
<li><a href="http://www.fantasynamegenerators.com/army-names.php">Army Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/dwarf-army-names.php">Army Names (Dwarf) <span class="red">- New!</span></a></li>
<li><a href="http://www.fantasynamegenerators.com/artifact-names.php">Artifact Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/artwork-names.php">Artwork Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/attack-move-names.php">Attack Move Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/anime-attack-names.php">Attack Names (Anime)</a></li>
<li><a href="http://www.fantasynamegenerators.com/award-names.php">Award Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/battle-names.php">Battle Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/board-game-names.php">Board Game Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/book-title-generator.php">Book Titles</a></li>
<li><a href="http://www.fantasynamegenerators.com/bouquet-names.php">Bouquet Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/boxer-names.php">Boxer Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/brand-names.php">Brand Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/bug-species-names.php">Bug Species Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/candy-names.php">Candy Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/chivalric-order-names.php">Chivalric Order Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/clothing-brand-names.php">Clothing Brand Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/clown-names.php">Clown Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/color-names.php">Color Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/computer-virus-names.php">Computer Virus Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/console-names.php">Console Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/constellation-names.php">Constellation Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/council-names.php">Council Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/crop-names.php">Crop Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/currency-names.php">Currency Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/curse-names.php">Curse Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/dance-names.php">Dance Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/date-names.php">Date Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/disease-names.php">Disease Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/magical-disease-names.php">Disease (Magical) Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/scientific-disease-names.php">Disease (Scientific) Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/dinosaur-names.php">Dinosaur Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/drink-names.php">Drink Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/drug-names.php">Drug Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/enchantment-names.php">Enchantment Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/gear-enchantment-names.php">Enchanted Gear Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/energy-types.php">Energy Types</a></li>
<li><a href="http://www.fantasynamegenerators.com/epithet-generator.php">Epithets</a></li>
<li><a href="http://www.fantasynamegenerators.com/evil-group-names.php">Evil Organizations</a></li>
<li><a href="http://www.fantasynamegenerators.com/magical-plant-names.php">Fantasy Plant Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/magical-tree-names.php">Fantasy Tree Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/profession-names.php">Fantasy Profession Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/food-names.php">Food Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/fantasy-food-names.php">Food Names (Fantasy)</a></li>
<li><a href="http://www.fantasynamegenerators.com/fruit-vegetable-names.php">Fruit & Veg. Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/fungi_names.php">Fungus Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/galaxy-names.php">Galaxy Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/game-engine-names.php">Game Engine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/game-soundtrack-names.php">Game Soundtrack Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/gang-names.php">Gang / Clan Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/gem-mineral-names.php">Gemstone/Mineral</a></li>
<li><a href="http://www.fantasynamegenerators.com/graffiti-tags.php">Graffiti Tags</a></li>
<li><a href="http://www.fantasynamegenerators.com/guild_names.php">Guild / Clan Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/hacker-names.php">Hacker Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/heist-names.php">Heist Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/herb-names.php">Herb & Spice Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/holiday-names.php">Holiday Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/holy-book-names.php">Holy Book Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/human-species-names.php">Human Species Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/instrument-names.php">Instrument Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/invention-names.php">Invention Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/jewelry-names.php">Jewelry Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/language-names.php">Language Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/love-nicknames.php">Love nicknames</a></li>
<li><a href="http://www.fantasynamegenerators.com/magazine-names.php">Magazine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/magic-types.php">Magic Types</a></li>
<li><a href="http://www.fantasynamegenerators.com/martial-arts-names.php">Martial Arts Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/mascot-names.php">Mascot Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/material-names.php">Material Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/medicine-names.php">Medicine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/metal_names.php">Metal/Element Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/military-division-names.php">Military Division Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/military-honor-names.php">Military Honor Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/military-operation-names.php">Military Operation Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/military-rank-names.php">Military Rank Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/mobster-names.php">Mobster Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/molecule-names.php">Molecule Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/motorcycle-club-names.php">Motorcycle Clubs</a></li>
<li><a href="http://www.fantasynamegenerators.com/motorsport-race-names.php">Motorsport Races</a></li>
<li><a href="http://www.fantasynamegenerators.com/album-names.php">Music Album Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/band-names.php">Music Band Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/musician-names.php">Musician Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/mutant-plant-names.php">Mutant Plant Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/natural-disaster-names.php">Natural Disaster Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/newspaper-names.php">Newspaper Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/nicknames.php">Nicknames</a></li>
<li><a href="http://www.fantasynamegenerators.com/noble-house-names.php">Noble House Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/pirate-crew-names.php">Pirate Crew Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/plague-names.php">Plague Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/tree_names.php">Plant and Tree Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/class-names.php">Player Class & NPC Types</a></li>
<li><a href="http://www.fantasynamegenerators.com/poison-names.php">Poison Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/political-party-names.php">Political Party Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/post-apocalyptic-society-names.php">Post-Apocalyptic Society</a></li>
<li><a href="http://www.fantasynamegenerators.com/potion-names.php">Potion Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/racer-names.php">Racer Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/railway-names.php">Railway Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/rank-names.php">Rank Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/religion-names.php">Religion Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/satellite-names.php">Satellite Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/bird_names.php">Scientific Bird Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/scientific-creature-names.php">Scientific Creature Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/plant_names.php">Scientific Plant Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/magic-school-book-names.php">School Book Names (Magic)</a></li>
<li><a href="http://www.fantasynamegenerators.com/siege-engine-names.php">Siege Engine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/software-names.php">Software Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/song-title-generator.php">Song Titles</a></li>
<li><a href="http://www.fantasynamegenerators.com/space-fleet-names.php">Space Fleet Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/spell-names.php">Spell Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/sport-names.php">Sport Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/sports-team-names.php">Sports Team Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/squad-names.php">Squad Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/steampunk-walker-names.php">Steampunk Walker Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/superpowers.php">Superpowers</a></li>
<li><a href="http://www.fantasynamegenerators.com/teleportation-names.php">Teleportation Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/theme-park-rides.php">Theme Park Ride Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/throne-names.php">Throne Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/time-period-names.php">Time Period Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/title-names.php">Title Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/tool-nicknames.php">Tool Nicknames</a></li>
<li><a href="http://www.fantasynamegenerators.com/treaty-names.php">Treaty Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/tribal-names.php">Tribal Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/tribe-names.php">Tribe Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/username-generator.php">Usernames</a></li>
<li style="font-size: 100%; padding: 3px 0.5%;">Vehicle Names >
<ol>
<li><a href="http://www.fantasynamegenerators.com/airplane-names.php">Airplane Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/airship-names.php">Airship Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/car-names.php">Car Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/helicopter-names.php">Helicopter Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/military-vehicle-names.php">Military Vehicle Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/pirate-ship-names.php">Pirate Ship Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/ship-names.php">Ship Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/spaceship-names.php">Spaceship Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/submarine-names.php">Submarine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/tank-names.php">Tank Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/vehicle-names.php">Vehicle Names</a></li>
</ol>
</li>
<li><a href="http://www.fantasynamegenerators.com/video-game-names.php">Video Game Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/vocal-group-names.php">Vocal Group Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/weapon-abilities.php">Weapon Abilities</a></li>
<li style="font-size: 100%; padding: 3px 0.5%;">Weapon Names >
<ol>
<li><a href="http://www.fantasynamegenerators.com/battle-axe-names.php">Battle Axe Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/bomb-missile-names.php">Bombs & Missiles</a></li>
<li><a href="http://www.fantasynamegenerators.com/bow-names.php">Bows & Crossbows</a></li>
<li><a href="http://www.fantasynamegenerators.com/claw-weapon-names.php">Claws</a></li>
<li><a href="http://www.fantasynamegenerators.com/dagger-names.php">Daggers</a></li>
<li><a href="http://www.fantasynamegenerators.com/dual-wield-names.php">Dual Wielding</a></li>
<li><a href="http://www.fantasynamegenerators.com/fist-weapon-names.php">Fist Weapons</a></li>
<li><a href="http://www.fantasynamegenerators.com/flail-names.php">Flails & Maces</a></li>
<li><a href="http://www.fantasynamegenerators.com/magic-book-names.php">Magic Books</a></li>
<li><a href="http://www.fantasynamegenerators.com/magic-weapon-names.php">Magic Weapons</a></li>
<li><a href="http://www.fantasynamegenerators.com/pistol-names.php">Pistols</a></li>
<li><a href="http://www.fantasynamegenerators.com/rifle-names.php">Rifles</a></li>
<li><a href="http://www.fantasynamegenerators.com/sci-fi-gun-names.php">Sci-Fi Guns</a></li>
<li><a href="http://www.fantasynamegenerators.com/shotgun-names.php">Shotguns</a></li>
<li><a href="http://www.fantasynamegenerators.com/spear-names.php">Spears & Halberds</a></li>
<li><a href="http://www.fantasynamegenerators.com/staff-names.php">Staves</a></li>
<li><a href="http://www.fantasynamegenerators.com/sword-names.php">Swords</a></li>
<li><a href="http://www.fantasynamegenerators.com/throwing-weapon-names.php">Throwing Weapons</a></li>
<li><a href="http://www.fantasynamegenerators.com/war-hammer-names.php">War Hammers</a></li>
<li><a href="http://www.fantasynamegenerators.com/scythe-names.php">War Scythes</a></li>
<li><a href="http://www.fantasynamegenerators.com/whip-names.php">Whips & Lassos</a></li>
</ol>
</li>
<li><a href="http://www.fantasynamegenerators.com/web-series-names.php">Web Series</a></li>
<li><a href="http://www.fantasynamegenerators.com/wine-names.php">Wine Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/wrestler-names.php">Wrestler Names</a></li>
<li><a href="http://www.fantasynamegenerators.com/wrestling-move-names.php">Wrestling Move Names</a></li>
"""
|
d2emon/generator-pack
|
src/genesys/generator/fng/name/other/__init__.py
|
Python
|
gpl-3.0
| 19,197
|
[
"Galaxy"
] |
a863fc6bae069cd60dd18b53895d13632349ef1cee9f09a01f5d006d90bb029a
|
# Twisted Imports
from twisted.internet import reactor, defer, task
from twisted.python import log
from twisted.logger import Logger
# Octopus Imports
from octopus.sequence.util import Runnable, Pausable, Cancellable, BaseStep
from octopus.sequence.error import NotRunning, AlreadyRunning, NotPaused
from octopus.constants import State
from octopus.data.data import BaseVariable
from octopus.machine import Component
from octopus.events import EventEmitter
# Debugging
defer.Deferred.debug = True
def _subclasses (cls):
return cls.__subclasses__() + [
g for s in cls.__subclasses__()
for g in _subclasses(s)
]
def get_block_plugin_modules ():
# Add plugin machine blocks
# https://packaging.python.org/guides/creating-and-discovering-plugins/
import importlib
import pkgutil
import octopus.blocks
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.walk_packages(ns_pkg.__path__, ns_pkg.__name__ + ".")
return {
name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(octopus.blocks)
}
def get_block_plugin_block_names (check_subclass):
return [
name
for mod in get_block_plugin_modules().values()
for name, cls in mod.__dict__.items()
if isinstance(cls, type)
and issubclass(cls, check_subclass)
and cls is not check_subclass
]
def get_machine_js_definitions ():
from octopus.blocktopus.blocks.machines import machine_declaration
for block_cls in _subclasses(machine_declaration):
try:
yield (block_cls.__name__, block_cls.get_interface_definition())
except AttributeError:
pass
def get_connection_js_definitions ():
from octopus.blocktopus.blocks.machines import connection_declaration
for connection_cls in _subclasses(connection_declaration):
try:
yield (connection_cls.__name__, connection_cls.get_interface_definition())
except AttributeError:
pass
class Workspace (Runnable, Pausable, Cancellable, EventEmitter):
log = Logger()
def __init__ (self):
self.state = State.READY
self.allBlocks = {}
self.topBlocks = {}
self.variables = Variables()
def addBlock (self, id, type, fields = None, x = 0, y = 0):
from octopus.blocktopus.block_registry import get_block_class
self.log.debug(
"Add block to workspace. ID: {block_id} type: {block_type}, fields: {fields}",
block_id = id, block_type = type, fields = fields
)
try:
blockType = type
blockClass = get_block_class(blockType)
except KeyError:
self.log.warn("Unknown block type {block_type} requested", block_type = blockType)
raise Exception("Unknown Block: %s" % blockType)
block = blockClass(self, id)
block.position = [x, y]
try:
for field, value in fields.items():
block.fields[field] = value
except AttributeError:
self.log.warn("Block type {block_type} has no fields", block_type = blockType)
pass
block.created()
self.allBlocks[block.id] = block
self.topBlocks[block.id] = block
self.emit('top-block-added', block = block)
def getBlock (self, id):
try:
return self.allBlocks[id]
except KeyError:
print("Attempted to access unconnected block {:s}".format(str(id)))
raise
def removeBlock (self, id):
block = self.getBlock(id)
try:
del self.topBlocks[block.id]
except KeyError:
pass
# Disconnect prevBlock connection
prev = block.prevBlock
if prev is not None:
if prev.nextBlock == block:
prev.disconnectNextBlock(block)
else:
prevInputs = prev.inputs
for input in prevInputs.keys():
if prevInputs[input] is block:
prev.disconnectInput(input, "value")
# Disconnect nextBlock connection
next = block.nextBlock
if next is not None:
if next.prevBlock == block:
block.disconnectNextBlock(next)
# Disconnect output connection
output = block.outputBlock
if output is not None:
outputInputs = output.inputs
for input in outputInputs.keys():
if outputInputs[input] is block:
output.disconnectInput(input, "value")
try:
del self.allBlocks[block.id]
except KeyError:
pass
self.emit('top-block-removed', block = block)
block.disposed()
def connectBlock (self, id, parent, connection, input = None):
childBlock = self.getBlock(id)
parentBlock = self.getBlock(parent)
if id in self.topBlocks:
del self.topBlocks[id]
if connection == "input-value":
parentBlock.connectInput(input, childBlock, "value")
elif connection == "input-statement":
parentBlock.connectInput(input, childBlock, "statement")
elif connection == "previous":
parentBlock.connectNextBlock(childBlock)
self.emit('top-block-removed', block = childBlock)
def disconnectBlock (self, id, parent, connection, input = None):
childBlock = self.getBlock(id)
parentBlock = self.getBlock(parent)
self.topBlocks[id] = childBlock
if connection == "input-value":
parentBlock.disconnectInput(input, "value")
elif connection == "input-statement":
parentBlock.disconnectInput(input, "statement")
elif connection == "previous":
parentBlock.disconnectNextBlock(childBlock)
self.emit('top-block-added', block = childBlock)
#
# Controls
#
def _run (self):
self._complete = defer.Deferred()
dependencyGraph = []
runningBlocks = set()
externalStopBlocks = set()
resumeBlocks = []
self.emit("workspace-started")
def _runBlock (block):
if self.state is State.PAUSED:
self._onResume = _onResume
resumeBlocks.append(block)
return
if block.externalStop:
externalStopBlocks.add(block)
else:
runningBlocks.add(block)
# Run in the next tick so that dependency graph
# and runningBlocks are all updated before blocks
# are run (and potentially finish)
d = task.deferLater(reactor, 0, block.run)
d.addCallbacks(
callback = _blockComplete,
callbackArgs = [block],
errback = _blockError,
errbackArgs = [block]
)
d.addErrback(log.err)
def _onResume ():
for block in resumeBlocks:
_runBlock(block)
resumeBlocks = []
def _blockComplete (result, block):
if block.externalStop:
return
runningBlocks.discard(block)
decls = block.getGlobalDeclarationNames()
# Check if any other blocks can be run
toRun = []
for item in dependencyGraph:
for decl in decls:
item["deps"].discard(decl)
if len(item["deps"]) == 0:
toRun.append(item)
# _runBlock needs to be called in the next tick (done in _runBlock)
# so that the dependency graph is updated before any new blocks run.
for item in toRun:
dependencyGraph.remove(item)
item["block"].off("connectivity-change", item["onConnectivityChange"])
_runBlock(item["block"])
# Check if the experiment can be finished
reactor.callLater(0, _checkFinished)
def _blockError (failure, block):
if failure.type is Disconnected:
return _blockComplete(None, block)
# If any one step fails, cancel the rest.
if not _blockError.called:
self.log.error(
"Received error {error} from block {block.id}. Aborting.",
error = failure,
block = block,
)
def _errback (error):
# Pass the error if this is called as errback, or else
# the original failure if abort() had no errors.
# Call later to try to allow any other block-state events
# to propagate before the listeners are cancelled.
if not self._complete.called:
_externalStop()
self.state = State.ERROR
reactor.callLater(0, self._complete.errback, error or failure)
self.emit("workspace-stopped")
_blockError.called = True
try:
self.abort().addBoth(_errback)
except NotRunning:
pass
# Allow access to called within scope of _blockError
_blockError.called = False
def _updateDependencyGraph (data = None, block = None):
toRemove = []
for item in dependencyGraph:
if block is not None and item['block'] is not block:
continue
# If a block is no longer a top block, remove it
# from the dependency graph
if item['block'].prevBlock is not None:
toRemove.append(item)
continue
# Update dependency list
item['deps'] = set(item['block'].getUnmatchedVariableNames())
for item in toRemove:
item['block'].off('connectivity-change', item['onConnectivityChange'])
dependencyGraph.remove(item)
# When a new top block is added, add it to the list of blocks that must
# complete before the run can be finished; or to the list of blocks that
# must be stopped when the run finishes, if appropriate.
@self.on('top-block-added')
def onTopBlockAdded (data):
block = data['block']
if block._complete is not None and block._complete.called is False:
if block.externalStop:
externalStopBlocks.add(block)
else:
runningBlocks.add(block)
block._complete.addCallbacks(
callback = _blockComplete,
callbackArgs = [block],
errback = _blockError,
errbackArgs = [block]
).addErrback(log.err)
_updateDependencyGraph()
self.on('top-block-removed', _updateDependencyGraph)
# If there are no more running blocks, stop running.
def _checkFinished (error = None):
self.log.debug("Finished?: Waiting for {count} blocks", count = len(runningBlocks))
if len(runningBlocks) > 0:
return
self.log.warn("Skipped blocks:" + str(dependencyGraph))
if not (_blockError.called or self._complete.called):
_externalStop()
self.state = State.COMPLETE
self._complete.callback(None)
_removeListeners()
def _removeListeners ():
self.emit("workspace-stopped")
self.off('top-block-added', onTopBlockAdded)
self.off('top-block-removed', _updateDependencyGraph)
for item in dependencyGraph:
item['block'].off('connectivity-change', item['onConnectivityChange'])
# Cancel all blocks which must be stopped externally.
def _externalStop ():
for block in externalStopBlocks:
try:
block.cancel(propagate = True).addErrback(log.err)
except NotRunning:
pass
# Set up the dependency graph
allDeclaredGlobalVariables = set()
blocksToRunImmediately = []
dependencyError = False
# Create a list of all global variables defined in the workspace
for block in self.topBlocks.values():
allDeclaredGlobalVariables.update(block.getGlobalDeclarationNames())
def _generateOnConnectivityChange (block):
def onConnectivityChange (data):
_updateDependencyGraph(block = block)
return onConnectivityChange
# Defer blocks with dependencies until these have been met.
for block in self.topBlocks.values():
deps = set(block.getUnmatchedVariableNames())
# Check that all of these dependencies will be met.
for dep in deps:
if dep not in allDeclaredGlobalVariables:
self.emit(
"log-message",
level = "error",
message = "Referenced variable {:s} is never defined. ".format(dep),
block = block.id
)
dependencyError = True
if len(deps) == 0:
log.msg("Block %s has no deps, running now" % block.id)
blocksToRunImmediately.append(block)
else:
log.msg("Block %s waiting for %s" % (block.id, deps))
onConnectivityChange = _generateOnConnectivityChange(block)
block.on("connectivity-change", onConnectivityChange)
dependencyGraph.append({
"block": block,
"deps": deps,
"onConnectivityChange": onConnectivityChange
})
# If there are no blocks that have no dependencies, then
# there must be a circular dependency somewhere!
if len(blocksToRunImmediately) == 0:
self.emit(
"log-message",
level = "error",
message = "No blocks can run."
)
dependencyError = True
# Check for circular dependencies using a topological sorting algorithm
def findCircularDependencies (blocks, graph):
circularDeps = []
while len(blocks) > 0:
block = blocks.pop()
toRemove = []
for item in graph:
for decl in block["decls"]:
item["deps"].discard(decl)
if len(item["deps"]) == 0:
toRemove.append(item)
for item in toRemove:
graph.remove(item)
blocks.append(item)
# Remove any blocks that just depend on one of the
# circularly-dependent blocks
toRemove = []
for item in graph:
if len(item["decls"]) == 0:
toRemove.append(item)
for item in toRemove:
graph.remove(item)
return graph
circularDeps = findCircularDependencies(
blocks = [{
"block": block.id,
"position": block.position,
"decls": block.getGlobalDeclarationNames()
} for block in blocksToRunImmediately],
graph = [{
"block": item["block"].id,
"position": item["block"].position,
"deps": item["deps"].copy(),
"decls": item["block"].getGlobalDeclarationNames()
} for item in dependencyGraph]
)
if len(circularDeps) > 0:
self.emit(
"log-message",
level = "error",
message = "Circular dependencies detected:"
)
for item in sorted(
circularDeps, key = lambda item: item["position"]
):
self.emit(
"log-message",
level = "error",
message = "* {:s} depends on {:s}".format(
', '.join(item["decls"]),
', '.join(item["deps"])
),
block = item["block"]
)
dependencyError = True
# Do not run if there was an error with the dependencies.
if dependencyError:
self.state = State.COMPLETE
self._complete.errback(Exception("Dependency errors prevented start."))
_removeListeners()
# Run blocks with no dependencies in order of their position.
# Blocks are sorted first by x then by y.
else:
for block in sorted(
blocksToRunImmediately, key = lambda b: b.position
):
_runBlock(block)
return self._complete
def _reset (self):
results = []
for block in self.topBlocks.values():
try:
results.append(block.reset())
except AlreadyRunning:
pass
return defer.DeferredList(results)
def _pause (self):
results = []
for block in self.topBlocks.values():
try:
results.append(block.pause())
except NotRunning:
pass
self.emit("workspace-paused")
return defer.DeferredList(results)
def _resume (self):
results = []
for block in self.topBlocks.values():
try:
block.resume()
except NotPaused:
pass
self.emit("workspace-resumed")
return defer.DeferredList(results)
def _cancel (self, abort = False):
results = []
for block in self.topBlocks.values():
try:
block.cancel(abort)
except NotRunning:
pass
return defer.DeferredList(results)
#
# Serialisation
#
def toEvents (self):
events = []
for block in self.topBlocks.values():
events.extend(block.toEvents())
return events
def fromEvents (self, events):
for e in events:
if "block" in e['data']:
e['data']['id'] = e['data']['block']
event = Event.fromPayload(e['type'], e['data'])
event.apply(self)
class Variables (EventEmitter):
def __init__ (self):
self._variables = {}
self._handlers = {}
def add (self, name, variable):
if name in self._variables:
if self._variables[name] is variable:
return
self.remove(name)
self._variables[name] = variable
def _makeHandler (name):
def onChange (data):
self.emit('variable-changed', name = name, **data)
return onChange
if isinstance(variable, BaseVariable):
onChange = _makeHandler(name)
variable.on('change', onChange)
self._handlers[name] = onChange
self.emit('variable-added', name = name, variable = variable)
elif isinstance(variable, Component):
handlers = {}
for attrname, attr in variable.variables.items():
onChange = _makeHandler(attrname)
attr.on('change', onChange)
handlers[attrname] = onChange
self._variables[attrname] = attr
self.emit('variable-added', name = attrname, variable = variable)
self._handlers[name] = handlers
else:
self._handlers[name] = None
def remove (self, name):
try:
variable = self._variables[name]
except KeyError:
return
if isinstance(variable, BaseVariable):
variable.off(
'change',
self._handlers[name]
)
self.emit('variable-removed', name = name, variable = variable)
elif isinstance(variable, Component):
for attrname, attr in variable.variables.items():
attr.off(
'change',
self._handlers[name][attrname]
)
self.emit('variable-removed', name = attrname, variable = variable)
del self._variables[attrname]
del self._variables[name]
del self._handlers[name]
def rename (self, oldName, newName):
log.msg("Renaming variable: %s to %s" % (oldName, newName))
if oldName == newName:
return
try:
variable = self._variables[oldName]
except KeyError:
return
if isinstance(variable, Component):
oldNames = [name for name, var in variable.variables.items()]
else:
oldNames = [oldName]
variable.alias = newName
for name in oldNames:
variable = self._variables[name]
newName = variable.alias
self._variables[newName] = self._variables[name]
self._handlers[newName] = self._handlers[name]
del self._variables[name]
del self._handlers[name]
self.emit('variable-renamed',
oldName = name,
newName = newName,
variable = variable
)
def get (self, name):
try:
return self._variables[name]
except KeyError:
return None
__getitem__ = get
__setitem__ = add
__delitem__ = remove
def items (self):
return self._variables.items()
def values (self):
return self._variables.values()
def anyOfStackIs (block, states):
while block:
if block.state in states:
return True
block = block.nextBlock
class Block (BaseStep, EventEmitter):
# If this block needs to be stopped by the workspace
# (e.g. long-running disconnected controls)
# TODO: make this more general - this ought to be True
# for any block with an output connection which is started
# by eval() rather than run()
externalStop = False
# If this block returns an output, the output data type
# may be specified. Useful if the block does not return a
# value immediately.
outputType = None
@property
def state (self):
return self._state
@state.setter
def state (self, value):
self._state = value
self.workspace.emit("block-state", block = self.id, state = value.name)
@property
def disabled (self):
try:
return self._disabled
except AttributeError:
return False
@disabled.setter
def disabled (self, disabled):
self._disabled = bool(disabled)
try:
if disabled:
self.cancel(propagate = False)
else:
self.reset(propagate = False)
except (NotRunning, AlreadyRunning):
pass
self.emit("connectivity-changed")
def __init__ (self, workspace, id):
self.workspace = workspace
self.id = id
self.type = self.__class__.__name__
self.state = State.READY
self.nextBlock = None
self.prevBlock = None
self.outputBlock = None
self.parentInput = None
self._complete = None
self.fields = {}
self.inputs = {}
self.mutation = ""
self.comment = ""
#self._addedInputs = []
self.collapsed = False
self.disabled = False
self.position = [0, 0]
self.inputsInline = None
def created (self):
pass
def disposed (self):
pass
def emitLogMessage (self, message, level):
self.workspace.emit(
"log-message",
level = level,
message = message,
block = self.id
)
def connectNextBlock (self, childBlock):
if self.nextBlock is not None:
raise Exception("Block.connectNextBlock (#%s): parent #%s already has a next Block" % (childBlock.id, self.id))
if childBlock.prevBlock is not None:
raise Exception("Block.connectNextBlock (#%s): child #%s already has a previous Block" % (self.id, childBlock.id))
self.nextBlock = childBlock
childBlock.prevBlock = self
childBlock.parentInput = None
if self.state in (State.RUNNING, State.PAUSED):
try:
childBlock.reset()
except AlreadyRunning:
pass
else:
if anyOfStackIs(childBlock, [State.RUNNING, State.PAUSED]):
if childBlock._complete is not None:
self._complete = defer.Deferred()
childBlock._complete.addCallbacks(self._complete.callback, self._complete.errback)
elif self.state is State.READY:
try:
childBlock.reset()
except AlreadyRunning:
pass
@childBlock.on('connectivity-changed')
def onConnChange (data):
self.emit('connectivity-changed', **data)
@childBlock.on('value-changed')
def onValueChange (data):
self.emit('value-changed', **data)
@self.on('disconnected')
def onDisconnect (data):
if "next" in data and data['next'] is True:
childBlock.off('connectivity-changed', onConnChange)
childBlock.off('value-changed', onValueChange)
self.off('disconnected', onDisconnect)
self.emit('connectivity-changed')
def disconnectNextBlock (self, childBlock):
if self.nextBlock != childBlock:
raise Exception("Block.disconnectNextBlock: must pass the correct child block")
# Cancel parent block if waiting for data
try:
if not childBlock._complete.called:
childBlock._complete.errback(Disconnected())
childBlock._complete = defer.Deferred()
except AttributeError:
pass
self.nextBlock = None
childBlock.prevBlock = None
childBlock.parentInput = None
self.emit('disconnected', next = True)
self.emit('connectivity-changed')
def getSurroundParent (self):
block = self
while block is not None:
if block.outputBlock is not None:
block = block.outputBlock
continue
prev = block.prevBlock
if prev.nextBlock is block:
block = prev
else:
return prev
return None
def getChildren (self):
children = []
for block in self.inputs.values():
if block is not None:
children.append(block)
if self.nextBlock is not None:
children.append(self.nextBlock)
return children
def setFieldValue (self, fieldName, value):
oldValue = self.getFieldValue(fieldName)
self.fields[fieldName] = value
self.emit('value-changed',
block = self,
field = fieldName,
oldValue = oldValue,
newValue = value
)
def getFieldValue (self, fieldName, default = None):
try:
return self.fields[fieldName]
except KeyError:
return default
def getInput (self, inputName):
return self.inputs[inputName]
def getInputValue (self, inputName, default = False):
try:
input = self.inputs[inputName]
except KeyError:
input = None
if input is None:
return defer.succeed(default)
def error (failure):
failure.trap(Cancelled, Disconnected)
return default
return input.eval().addErrback(error)
def connectInput (self, inputName, childBlock, type):
if type == "value":
childBlock.outputBlock = self
elif type == "statement":
childBlock.prevBlock = self
else:
raise Exception("Block.connectInput: invalid type %s" % type)
self.inputs[inputName] = childBlock
childBlock.parentInput = inputName
if type == "value":
if self.state is State.READY:
try:
childBlock.reset()
except AlreadyRunning:
pass
elif self.state is State.RUNNING:
try:
childBlock.reset()
childBlock.run()
except AlreadyRunning:
pass
elif self.state is State.PAUSED:
if childBlock.state is State.PAUSED:
pass
elif childBlock.state is State.RUNNING:
childBlock.pause()
else:
# Should not raise AlreadyRunning due to two if's above
childBlock.reset()
# Do not call run() because most input blocks will be eval()ed.
# Parent blocks expecting to run() children should run them
# again when they are resumed.
else:
try:
childBlock.cancel()
except NotRunning:
pass
@childBlock.on('connectivity-changed')
def onConnChange (data):
self.emit('connectivity-changed', **data)
@childBlock.on('value-changed')
def onValueChange (data):
self.emit('value-changed', **data)
@self.on('disconnected')
def onDisconnect (data):
if "input" in data and data['input'] == inputName:
childBlock.off('connectivity-changed', onConnChange)
childBlock.off('value-changed', onValueChange)
self.off('disconnected', onDisconnect)
self.emit('connectivity-changed')
self.workspace.emit('top-block-removed', block = childBlock)
def disconnectInput (self, inputName, type):
try:
childBlock = self.inputs[inputName]
except KeyError:
return
# Cancel parent block if waiting for data
try:
if not childBlock._complete.called:
childBlock._complete.errback(Disconnected())
childBlock._complete = defer.Deferred()
except AttributeError:
pass
if type == "value":
childBlock.outputBlock = None
elif type == "statement":
childBlock.prevBlock = None
else:
raise Exception("Block.disconnectInput: invalid type %s" % type)
self.inputs[inputName] = None
childBlock.parentInput = None
self.emit('disconnected', input = inputName)
self.emit('connectivity-changed')
self.workspace.emit('top-block-added', block = childBlock)
def getReferencedVariables (self, variables = None):
variables = variables or []
for block in self.getChildren():
variables.extend(block.getReferencedVariables())
return variables
def getReferencedVariableNames (self, variables = None):
variables = variables or []
for block in self.getChildren():
variables.extend(block.getReferencedVariableNames())
return variables
def getGlobalDeclarationNames (self, variables = None):
""" Returns a list of global variable names
that are declared within this block.
Note: This function must not return any local
variable names, because this will look like a
circular dependency.
"""
variables = variables or []
for block in self.getChildren():
variables.extend(block.getGlobalDeclarationNames())
return variables
def getUnmatchedVariableNames (self, variables = None):
""" Find variables that must be defined in a higher scope.
Returns a list of referenced variables that
are not defined within their scope (i.e. must be
defined globally."""
variables = variables or []
for block in self.getChildren():
variables.extend(block.getUnmatchedVariableNames())
return variables
#
# Control
#
def run (self, parent = None):
# This block has been disabled or cancelled - skip it.
if self.disabled is True or self.state is State.CANCELLED:
if self.nextBlock is not None:
return self.nextBlock.run(parent)
else:
return defer.succeed(None)
# If this block is ready, then the entire stack must be ready.
if self.state is not State.READY:
raise AlreadyRunning
self.state = State.RUNNING
self.parent = parent
self._complete = defer.Deferred()
def _done (result = None):
""" Run the next block, chaining the callbacks """
if self.state is State.PAUSED:
self._onResume = _done
return
if self.state not in (State.CANCELLED, State.ERROR):
self.state = State.COMPLETE
if self.state is State.ERROR or self._complete is None:
# Don't continue execution if there has been an error
# (i.e. abort has been called)
pass
elif self.nextBlock is not None:
def _disconnected (failure):
f = failure.trap(Cancelled, Disconnected)
if f is Aborted:
raise f
try:
# Do not need to reset, should have been done on connect.
d = self.nextBlock.run()
except AlreadyRunning:
if self.nextBlock._complete is not None:
d = self.nextBlock._complete
else:
self._complete.callback(None)
return
d.addErrback(
_disconnected
).addCallbacks(
lambda result: self._complete.callback(result),
lambda failure: self._complete.errback(failure)
)
else:
self._complete.callback(None)
def _error (failure):
log.err("Block %s #%s Error: %s" % (self.type, self.id, failure))
self.state = State.ERROR
if self._complete is not None:
self._complete.errback(failure)
d = defer.maybeDeferred(self._run)
d.addCallbacks(_done, _error)
return self._complete
def eval (self):
return defer.succeed(None)
def pause (self):
if self.state is State.RUNNING:
self.state = State.PAUSED
results = [defer.maybeDeferred(self._pause)]
for block in self.getChildren():
try:
results.append(block.pause())
except NotRunning:
pass
return defer.DeferredList(results)
# Pass on pause call to next block.
elif self.nextBlock is not None:
return self.nextBlock.pause()
# Bottom of stack, nothing was running
else:
raise NotRunning
def resume (self):
if self.state is State.PAUSED:
self.state = State.RUNNING
results = [defer.maybeDeferred(self._resume)]
# Blocks can set a function to call when they are resumed
try:
onResume, self._onResume = self._onResume, None
onResume()
except (AttributeError, TypeError):
pass
# Resume all children
for block in self.getChildren():
try:
block.resume()
except NotPaused:
pass
return defer.DeferredList(results)
# Pass on resume call
elif self.nextBlock is not None:
return self.nextBlock.resume()
# Bottom of stack, nothing needed resuming
else:
raise NotPaused
def cancel (self, abort = False, propagate = False):
if self.state in (State.RUNNING, State.PAUSED):
if abort:
self.state = State.ERROR
propagate = True
else:
self.state = State.CANCELLED
self._onResume = None
# Send cancelled message to any parent block.
try:
if abort and self._complete.called is False:
self._complete.errback(Aborted())
self._complete = None
except AttributeError:
pass
results = []
# Propagate cancel call if required
if propagate:
try:
results.append(self.nextBlock.cancel(abort, propagate))
except (AttributeError, NotRunning):
pass
# Cancel the block execution
results.append(defer.maybeDeferred(self._cancel, abort))
# Cancel any inputs
# (cancel without propagate affects only one block + inputs.)
for block in self.inputs.values():
# Cancel all input children
try:
results.append(block.cancel(abort, propagate = True))
except (AttributeError, NotRunning):
pass
return defer.DeferredList(results)
# Pass on call to next block.
elif (abort or propagate) and self.nextBlock is not None:
if self.state is State.READY:
self.state = State.CANCELLED
return self.nextBlock.cancel(abort, propagate)
# Bottom of stack, nothing was running
# Or, this step is not running yet. Stop it from running.
elif self.state is State.READY:
self.state = State.CANCELLED
return defer.succeed(None)
# Nothing to do
else:
return defer.succeed(None)
def _cancel (self, abort = False):
pass
def reset (self, propagate = True):
# Entire stack must not be RUNNING or PAUSED
if (not propagate and self.state in (State.RUNNING, State.PAUSED)) \
or (propagate and anyOfStackIs(self, (State.RUNNING, State.PAUSED))):
raise AlreadyRunning
results = []
# Reset this block and inputs
if self.state is not State.READY:
self.state = State.READY
self._onResume = None
results.append(defer.maybeDeferred(self._reset))
for block in self.inputs.values():
try:
results.append(block.reset())
except AlreadyRunning:
# Something has gone wrong as the this block's state
# should reflect those of its (input) children.
# Try to cancel the child.
results.append(block.cancel(propagate = True).addCallback(lambda _: block.reset))
except AttributeError:
pass
# Reset next block if propagating
if propagate and self.nextBlock is not None:
results.append(self.nextBlock.reset())
return defer.DeferredList(results)
#
# Serialise
#
def toEvents (self):
events = []
events.append({ "type": "AddBlock", "data": { "id": self.id, "type": self.type, "fields": self.fields }})
if self.mutation != "":
events.append({ "type": "SetBlockMutation", "data": { "id": self.id, "mutation": self.mutation }})
if self.comment != "":
events.append({ "type": "SetBlockComment", "data": { "id": self.id, "value": self.comment }})
if self.outputBlock is not None:
events.append({ "type": "ConnectBlock", "data": { "id": self.id, "connection": "input-value", "parent": self.outputBlock.id, "input": self.parentInput }})
elif self.prevBlock is not None:
if self.parentInput is not None:
events.append({ "type": "ConnectBlock", "data": { "id": self.id, "connection": "input-statement", "parent": self.prevBlock.id, "input": self.parentInput }})
else:
events.append({ "type": "ConnectBlock", "data": { "id": self.id, "connection": "previous", "parent": self.prevBlock.id }})
for child in self.getChildren():
events.extend(child.toEvents())
if self.disabled:
events.append({ "type": "SetBlockDisabled", "data": { "id": self.id, "value": True }})
if self.inputsInline is False:
events.append({ "type": "SetBlockInputsInline", "data": { "id": self.id, "value": False }})
# Collapsed should come after children
if self.collapsed:
events.append({ "type": "SetBlockCollapsed", "data": { "id": self.id, "value": True }})
# Only move top blocks, and only once children have been added
if self.outputBlock is None and self.prevBlock is None:
events.append({ "type": "SetBlockPosition", "data": { "id": self.id, "x": self.position[0], "y": self.position[1] }})
return events
def _toHyphenated (name):
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
def _toUpperCamel (name):
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return "".join(map(str.capitalize, str(name).split('-')))
class Event (object):
"""
Events that can be applied to a workspace.
"""
jsProtocol = 'block'
@classmethod
def fromPayload (cls, action, payload):
try:
try:
return cls.types[action](**payload)
except AttributeError:
cls.types = { c.jsTopic: c for c in cls.__subclasses__() }
cls.types.update({ c.__name__: c for c in cls.__subclasses__() })
return cls.types[action](**payload)
except KeyError:
raise UnknownEventError(_toUpperCamel(action))
_fields = ()
def __init__ (self, **fields):
values = {}
for f in self._fields:
values[f] = fields[f] if f in fields else None
self.values = values
self.type = self.__class__.__name__
def valuesWithEventId (self, event_id):
values = self.values.copy()
values['event'] = event_id
return values
def apply (self, workspace):
pass
def toJSON (self):
import json
return json.dumps({
"type": self.type,
"data": self.values
})
class AddBlock (Event):
_fields = ("id", "type", "fields", "x", "y")
jsTopic = "created"
def apply (self, workspace):
workspace.addBlock(**self.values)
class RemoveBlock (Event):
_fields = ("id", )
jsTopic = "disposed"
def apply (self, workspace):
workspace.removeBlock(**self.values)
class ConnectBlock (Event):
_fields = ("id", "connection", "parent", "input")
jsTopic = "connected"
def apply (self, workspace):
workspace.connectBlock(**self.values)
class DisconnectBlock (Event):
_fields = ("id", "connection", "parent", "input")
jsTopic = "disconnected"
def apply (self, workspace):
workspace.disconnectBlock(**self.values)
class SetBlockPosition (Event):
_fields = ("id", "x", "y")
jsTopic = "set-position"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.position = [
int(self.values['x'] or 0),
int(self.values['y'] or 0)
]
class SetBlockFieldValue (Event):
_fields = ("id", "field", "value")
jsTopic = "set-field-value"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.setFieldValue(self.values['field'], self.values['value'])
class SetBlockDisabled (Event):
_fields = ("id", "value")
jsTopic = "set-disabled"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.disabled = self.values['value']
class SetBlockCollapsed (Event):
_fields = ("id", "value")
jsTopic = "set-collapsed"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.collapsed = bool(self.values['value'])
class SetBlockComment (Event):
_fields = ("id", "value")
jsTopic = "set-comment"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.comment = str(self.values['value'])
class SetBlockInputsInline (Event):
_fields = ("id", "value")
jsTopic = "set-inputs-inline"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.inputsInline = bool(self.values['value'])
class SetBlockMutation (Event):
_fields = ("id", "mutation")
jsTopic = "set-mutation"
def apply (self, workspace):
block = workspace.getBlock(self.values['id'])
block.mutation = self.values['mutation']
# Not Implemented:
# block-set-deletable (value)
# block-set-editable (value)
# block-set-movable (value)
# block-set-help-url (value)
# block-set-colour (value)
# Not Required
# block-add-input
# block-remove-input
# block-move-input
class UnknownEventError (Exception):
pass
class Disconnected (Exception):
pass
class Cancelled (Exception):
pass
class Aborted (Cancelled):
pass
# populate_blocks()
|
richardingham/octopus
|
octopus/blocktopus/workspace.py
|
Python
|
mit
| 37,321
|
[
"Octopus"
] |
84041f8c004d410018c6985417f5c5001fce34de307dd93d366003d72744a866
|
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
prev_failed = config.cache.get("cache/lastfailed", None) is not None
if (session.testscollected and prev_failed) or self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
|
tgoodlet/pytest
|
_pytest/cacheprovider.py
|
Python
|
mit
| 8,938
|
[
"VisIt"
] |
b0504b7bd28d91e4629d35ecc492a768dba474d8314b9850a3611a8724259bdc
|
import numpy as np
from ase import Atom, Atoms
from gpaw import GPAW, Mixer
from gpaw.eigensolvers import RMM_DIIS
from gpaw.test import equal
a = 4.0
n = 20
d = 1.0
x = d / 3**0.5
atoms = Atoms([Atom('C', (0.0, 0.0, 0.0)),
Atom('H', (x, x, x)),
Atom('H', (-x, -x, x)),
Atom('H', (x, -x, -x)),
Atom('H', (-x, x, -x))],
cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=4, txt='a.txt',
mixer=Mixer(0.25, 3, 1))
atoms.set_calculator(calc)
e0 = atoms.get_potential_energy()
niter0 = calc.get_number_of_iterations()
es = RMM_DIIS(blocksize=3)
calc = GPAW(gpts=(n, n, n), nbands=4, txt='b.txt',
mixer=Mixer(0.25, 3, 1), eigensolver=es)
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
equal(e0, e1, 0.000001)
equal(niter0, niter1, 0)
|
qsnake/gpaw
|
gpaw/test/blocked_rmm_diis.py
|
Python
|
gpl-3.0
| 893
|
[
"ASE",
"GPAW"
] |
709602fa6be698d4954572391d0cd213c7a7e3cf7cbafc879fccbcef201a83e0
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 Eric Pascual
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
"""
An assortment of classes modeling specific features of the BrickPi.
"""
from .core import *
OUTPUT_A = 'ttyAMA0:outA'
OUTPUT_B = 'ttyAMA0:outB'
OUTPUT_C = 'ttyAMA0:outC'
OUTPUT_D = 'ttyAMA0:outD'
INPUT_1 = 'in1'
INPUT_2 = 'in2'
INPUT_3 = 'in3'
INPUT_4 = 'in4'
class Leds(object):
"""
The BrickPi LEDs.
"""
# ~autogen led-colors platforms.brickpi.led>currentClass
blue_led1 = Led(name_pattern='brickpi1:blue:ev3dev')
blue_led2 = Led(name_pattern='brickpi2:blue:ev3dev')
LED1 = ( blue_led1, )
LED2 = ( blue_led2, )
BLUE = ( 1, )
@staticmethod
def set_color(group, color, pct=1):
"""
Sets brigthness of leds in the given group to the values specified in
color tuple. When percentage is specified, brightness of each led is
reduced proportionally.
Example::
Leds.set_color(LEFT, AMBER)
"""
for l, v in zip(group, color):
l.brightness_pct = v * pct
@staticmethod
def set(group, **kwargs):
"""
Set attributes for each led in group.
Example::
Leds.set(LEFT, brightness_pct=0.5, trigger='timer')
"""
for led in group:
for k in kwargs:
setattr(led, k, kwargs[k])
@staticmethod
def all_off():
"""
Turn all leds off
"""
Leds.blue_led1.brightness = 0
Leds.blue_led2.brightness = 0
# ~autogen
|
ensonic/ev3dev-lang-python-1
|
ev3dev/brickpi.py
|
Python
|
mit
| 2,738
|
[
"Amber"
] |
c802d3c17867e180f85fcf69259ee175b03dc98b385ecf87c84b6546126358fe
|
"""
Copyright (C) 2007-2010 Martin Laprise (mlaprise@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
This software is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANDABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from numpy import *
from scipy import *
from scipy import integrate
from core import *
# Planck constant [J*s]
h = 6.626069E-34
# Speed of light in vacuum [m/s]
c = 2.998E8
# boltzmann constant [J/K]
k = 1.3807E-23
# Ambiant temp. in Kelvin
T = 294.2
# Yb-doped Glass transition energy
epsYb = (h*c)/(10000*100)
class Amplifier():
'''
Class representing a generic Rare-Earth doped fiber amplifier
* fiber: Doped fiber
* pumpWL: List of pump wavelength [wl1 forward, wl1 backward, wl2 forward ...]
* pumpPower: List of pump power [pw1 forward, pw1 backward, pw2 forward ...]
* signalWL: List of signal wavelength [wl1 forward, wl1 backward, wl2 forward ...]
* signalPower: List of signal power [pw1 forward, pw1 backward, pw2 forward ...]
* nbrSections: Number of longitudinal section
* aseRes: Resolution of the ASE spectrum
'''
def __init__(self, fiber, pumpWL, pumpPower, signalWL, signalPower, nbrSections = 100, aseRes = 100):
self.dopedFiber = fiber
self.nbrSignal = len(signalWL)
self.nbrPump = len(pumpWL)
self.nbrAse = aseRes
self.signalWL = signalWL
self.pumpWL = pumpWL
self.aseWL = linspace(self.dopedFiber.wlMin, self.dopedFiber.wlMax, aseRes)
self.aseDeltaLambda = self.aseWL[1] - self.aseWL[0]
self.delta_nu = abs(-c/(pow(self.aseWL*1E-6,2))*self.aseDeltaLambda*1e-6)
[self.sigma_em_p, self.sigma_abs_p] = fiber.crossSection(pumpWL)
[self.sigma_em_s, self.sigma_abs_s] = fiber.crossSection(signalWL)
[self.sigma_em_ase, self.sigma_abs_ase] = fiber.crossSection(self.aseWL)
self.alpha_s = fiber.bgLoss(signalWL)
self.alpha_p = fiber.bgLoss(pumpWL)
self.alpha_ase = fiber.bgLoss(self.aseWL)
self.nbrSections = nbrSections
self.z = linspace(0,fiber.length,nbrSections)
self.dz = self.dopedFiber.length / nbrSections
self.P_ase_f = zeros([self.nbrAse, nbrSections])
self.P_ase_b = zeros([self.nbrAse, nbrSections])
self.P_p_f = zeros([self.nbrPump, nbrSections])
self.P_p_b = zeros([self.nbrPump, nbrSections])
self.P_s_f = zeros([self.nbrSignal, nbrSections])
self.P_s_b = zeros([self.nbrSignal, nbrSections])
self.sigDC = zeros(self.nbrSignal)
self.N2 = zeros(nbrSections)
self.N1 = zeros(nbrSections)
# Initiale Conditions
self.initNoise = 1E-6
self.P_p_f[:,0] = pumpPower[0:self.nbrPump]
self.P_p_b[:,-1] = pumpPower[self.nbrPump:2*self.nbrPump]
self.P_s_f[:,0] = signalPower[0:self.nbrSignal]
self.P_s_b[:,-1] = signalPower[self.nbrSignal:2*self.nbrSignal]
self.P_ase_f[:,0] = self.initNoise
self.P_ase_b[:,-1] = self.initNoise
self.error = 1.0
def __repr__(self):
return "Generic Rare-Earth doped fiber amplifier"
def set_init_pumpPower(self, pumpPower):
'''
Set the initial condition for the pumpPower for each pump
'''
self.P_p_f[:,0] = pumpPower
def get_pumpPower(self):
'''
Get the pump power
'''
return [self.P_p_f, self.P_p_b]
def get_signalPower(self):
'''
Get the signal power
'''
return [self.P_s_f, self.P_s_b]
def get_asePower(self):
'''
Get the ase power
'''
return [self.P_ase_f, self.P_ase_b]
def get_aseSpectrum(self, units='linear'):
'''
Return the ASE spectrum in both direction
'''
[ase_f, ase_b] = {
'linear': lambda: [self.P_ase_f[:,-1], self.P_ase_b[:,0]],
'dBm': lambda:[10*log10(self.P_ase_f[:,-1]), 10*log10(self.P_ase_b[:,0])],
}[units]()
return [ase_f, ase_b]
def set_init_signalPower(self, signalPower):
'''
Set the initial condition for the pumpPower for each signal
'''
self.P_s_f[:,0] = signalPower
def set_init_asePower(self, asePower):
'''
Set the initial condition for the pumpPower for each ASE signal
'''
self.P_ase_f[:,0] = asePower
def invSptProfil(self):
'''
Compute the population inversion spatial profil
'''
N2 = zeros(self.nbrSections)
N1 = zeros(self.nbrSections)
pWL = self.pumpWL
sWL = self.signalWL
aseWL = self.aseWL
# Construct the transition rate factor 1->3
W13 = zeros(self.nbrSections)
for m in arange(self.nbrPump):
W13 += (self.sigma_abs_p[m] * (self.P_p_f[m,:]/(self.dopedFiber.width(pWL[m])*1E-12))) / (h*c/(pWL[m]*1E-6))
for v in arange(self.nbrPump):
W13 += (self.sigma_abs_p[v] * (self.P_p_b[v,:]/(self.dopedFiber.width(pWL[v])*1E-12))) / (h*c/(pWL[v]*1E-6))
# Construct the transition rate factor 2->1
W21 = 0.0
for l in arange(self.nbrSignal):
W21 += (self.sigma_em_s[l] * (self.P_s_f[l,:]/(self.dopedFiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W21 += (self.sigma_em_s[u] * (self.P_s_b[u,:]/(self.dopedFiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W21 += (self.sigma_em_ase[n] * (self.P_ase_f[n,:]/(self.dopedFiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W21 += (self.sigma_em_ase[v] * (self.P_ase_b[v,:]/(self.dopedFiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Construct the transition rate factor 1->2
W12 = 0.0
for l in arange(self.nbrSignal):
W12 += (self.sigma_abs_s[l] * (self.P_s_f[l,:]/(self.dopedFiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W12 += (self.sigma_abs_s[u] * (self.P_s_b[u,:]/(self.dopedFiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W12 += (self.sigma_abs_ase[n] * (self.P_ase_f[n,:]/(self.dopedFiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W12 += (self.sigma_abs_ase[v] * (self.P_ase_b[v,:]/(self.dopedFiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Compute the level population
N2 = self.dopedFiber.concDopant * ( (W13 + W12) / ((1/self.dopedFiber.tau) + W21 + W12 + W13) )
N1 = self.dopedFiber.concDopant - N2
self.N1 = N1
self.N2 = N2
def simulate(self, direction=1, backwardOutput=False):
def dPdz(w, z, sigma_abs_p, sigma_em_s, sigma_abs_s,
sigma_abs_ase, sigma_em_ase, Fiber, pWL, sWL, aseWL,
alpha_s, alpha_p, alpha_ase, delta_nu):
'''
RHS of the ODE systems
'''
P_s_f = w[0:self.nbrSignal]
P_s_b = w[self.nbrSignal:2*self.nbrSignal]
P_p_f = w[2*self.nbrSignal:2*self.nbrSignal+self.nbrPump]
P_p_b = w[2*self.nbrSignal+self.nbrPump:2*self.nbrSignal+2*self.nbrPump]
P_ase_f = w[2*self.nbrSignal+2*self.nbrPump:2*self.nbrSignal+2*self.nbrPump+self.nbrAse]
P_ase_b = w[2*self.nbrSignal+2*self.nbrPump+self.nbrAse:2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse]
# Construct the transition rate factor 1->3
W13 = 0.0
for m in arange(self.nbrPump):
W13 += (sigma_abs_p[m] * (P_p_f[m]/(Fiber.width(pWL[m])*1E-12))) / (h*c/(pWL[m]*1E-6))
for v in arange(self.nbrPump):
W13 += (sigma_abs_p[v] * (P_p_b[v]/(Fiber.width(pWL[v])*1E-12))) / (h*c/(pWL[v]*1E-6))
# Construct the transition rate factor 2->1
W21 = 0.0
for l in arange(self.nbrSignal):
W21 += (sigma_em_s[l] * (P_s_f[l]/(Fiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W21 += (sigma_em_s[u] * (P_s_b[u]/(Fiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W21 += (sigma_em_ase[n] * (P_ase_f[n]/(Fiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W21 += (sigma_em_ase[v] * (P_ase_b[v]/(Fiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Construct the transition rate factor 1->2
W12 = 0.0
for l in arange(self.nbrSignal):
W12 += (sigma_abs_s[l] * (P_s_f[l]/(Fiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W12 += (sigma_abs_s[u] * (P_s_b[u]/(Fiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W12 += (sigma_abs_ase[n] * (P_ase_f[n]/(Fiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W12 += (sigma_abs_ase[v] * (P_ase_b[v]/(Fiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Compute the level population
N2 = Fiber.concDopant * ( (W13 + W12) / ((1/Fiber.tau) + W21 + W12 + W13) )
N1 = Fiber.concDopant - N2
P = zeros(2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse)
i = 0
# Signal Power
for l in arange(self.nbrSignal):
P[i] = sign(direction)*(sigma_em_s[l]*N2 - sigma_abs_s[l]*N1 - alpha_s) * P_s_f[l] * Fiber.modeOverlap(sWL[l], self.sigDC[l])
i += 1
for u in arange(self.nbrSignal):
P[i] = -sign(direction)*(sigma_em_s[u]*N2 - sigma_abs_s[u]*N1 - alpha_s) * P_s_b[u] * Fiber.modeOverlap(sWL[u], self.sigDC[l])
i += 1
# Pump Power
for m in arange(self.nbrPump):
P[i] = sign(direction)*(-sigma_abs_p[m]*N1 - alpha_p) * P_p_f[m] * Fiber.pumpOverlap(pWL[m])
i += 1
for v in arange(self.nbrPump):
P[i] = -sign(direction)*(-sigma_abs_p[v]*N1 - alpha_p) * P_p_b[v] * Fiber.pumpOverlap(pWL[v])
i += 1
# ASE Power
for n in arange(self.nbrAse):
P[i] = sign(direction)*(sigma_em_ase[n]*N2 - sigma_abs_ase[n]*N1 - alpha_ase) * P_ase_f[n] * Fiber.modeOverlap(aseWL[n])
P[i] += sign(direction)*2*(h*c/(aseWL[n]*1E-6)) * delta_nu[n] * sigma_em_ase[n]*N2 * Fiber.modeOverlap(aseWL[n])
i += 1
for v in arange(self.nbrAse):
P[i] = -sign(direction)*(sigma_em_ase[v]*N2 - sigma_abs_ase[v]*N1 - alpha_ase) * P_ase_b[v] * Fiber.modeOverlap(aseWL[v])
P[i] += -sign(direction)*2*(h*c/(aseWL[v]*1E-6)) * delta_nu[v] * sigma_em_ase[v]*N2 * Fiber.modeOverlap(aseWL[v])
i += 1
return P
arguments = (self.sigma_abs_p, self.sigma_em_s, self.sigma_abs_s,
self.sigma_abs_ase, self.sigma_em_ase, self.dopedFiber,
self.pumpWL, self.signalWL, self.aseWL,
self.alpha_s, self.alpha_p, self.alpha_ase, self.delta_nu)
# Set the initials conditions and resolve the ode system
if sign(direction) == 1:
w0 = r_[self.P_s_f[:,0],self.P_s_b[:,0],self.P_p_f[:,0],self.P_p_b[:,0],self.P_ase_f[:,0],self.P_ase_b[:,0]]
else:
w0 = r_[self.P_s_f[:,-1],self.P_s_b[:,-1],self.P_p_f[:,-1],self.P_p_b[:,-1],self.P_ase_f[:,-1],self.P_ase_b[:,-1]]
solution = integrate.odeint(dPdz, w0, self.z, args=arguments)
self.P_s_f = solution[:,0:self.nbrSignal].T
self.P_p_f = solution[:,2*self.nbrSignal:2*self.nbrSignal+self.nbrPump].T
self.P_ase_f = solution[:,2*self.nbrSignal+2*self.nbrPump:2*self.nbrSignal+2*self.nbrPump+self.nbrAse].T
if backwardOutput:
self.P_p_b = solution[:,2*self.nbrSignal+self.nbrPump:2*self.nbrSignal+2*self.nbrPump].T
self.P_s_b = solution[:,self.nbrSignal:2*self.nbrSignal].T
self.P_ase_b = solution[:,2*self.nbrSignal+2*self.nbrPump+self.nbrAse:2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse].T
# Use the chi2 between the backward ASE signals computed in two different iterations to evaluate the convergence
ase_b = solution[:,2*self.nbrSignal+2*self.nbrPump+self.nbrAse:2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse].T[:,-1]
self.error = chi2(self.P_p_b[:,-1], ase_b)
def simulateBackward(self, direction=1):
'''
Propagate the signal in backward direction using the population
found in the previous forward iteration. Since N2 and N1 are constant
we can solve each equations with a simple integration
'''
# Get the initiale conditions
Pp_ini = self.P_p_b[:,-1]
Ps_ini = self.P_s_b[:,-1]
Pase_ini = self.P_ase_b[:,-1]
self.invSptProfil()
for m in arange(self.nbrPump):
integrant = sign(direction)*(-self.sigma_abs_p[m]*self.N1[::-1] - self.alpha_p) * self.dopedFiber.pumpOverlap(self.pumpWL[m])
self.P_p_b[m,::-1] = r_[Pp_ini[m], Pp_ini[m]*exp(integrate.cumtrapz(integrant, self.z))]
for l in arange(self.nbrSignal):
integrant = sign(direction)*(self.sigma_em_s[l]*self.N2[::-1] - self.sigma_abs_s[l]*self.N1[::-1] - self.alpha_s)
integrant *= self.dopedFiber.modeOverlap(self.signalWL[l], self.sigDC[l])
self.P_s_b[l,::-1] = r_[Ps_ini[l], Ps_ini[l]*exp(integrate.cumtrapz(integrant, self.z))]
for v in arange(self.nbrAse):
integrant = sign(direction)*(self.sigma_em_ase[v]*self.N2[::-1] - self.sigma_abs_ase[v]*self.N1[::-1] - self.alpha_ase)
integrant *= self.dopedFiber.modeOverlap(self.aseWL[v])
integrant2 = sign(direction)*2*(h*c/(self.aseWL[v]*1E-6)) * self.delta_nu[v] * self.sigma_em_ase[v]*self.N2[::-1]
integrant2 *= self.dopedFiber.modeOverlap(self.aseWL[v])
sol = integrate.cumtrapz(integrant, self.z)
solTerme1 = exp(sol)
solTerme1b = r_[1.0, exp(-sol)]
solTerme2 = solTerme1 * integrate.cumtrapz(integrant2*solTerme1b, self.z)
self.P_ase_b[v,::-1] = r_[Pase_ini[v], Pase_ini[v]*solTerme1 + solTerme2]
def run(self, errorTol, nbrItrMax, errorOutput = False, verbose=False):
error = zeros(nbrItrMax)
self.simulate()
for i in arange(nbrItrMax):
self.simulateBackward()
self.simulate()
error[i] = self.error
if verbose:
print error[i]
return error
class erbiumAmplifierSimple():
'''
Class representing a very simple erbium amplifier 1 pump, 1 signal forward
'''
def __init__(self, fiber, pumpPower, signalPower, nbrSections = 100):
# Physical properties of the amplifier
self.dopedFiber = fiber
self.signalWL = 1.55
self.pumpWL = 0.980
self.aseWL = 1.50
self.sigma_abs_p = 2.7E-25
self.sigma_em_p = 0.0
self.sigma_em_s = 2.52E-25
self.sigma_abs_s = 1.98E-25
[self.sigma_em_ase, self.sigma_abs_ase] = fiber.crossSection(self.aseWL)
self.alpha_s = 0.0
self.alpha_p = 0.0
self.alpha_ase = 0.0
self.delta_nu = 125E-9
self.nbrSections = nbrSections
self.z = linspace(0,fiber.length,nbrSections)
self.dz = self.dopedFiber.length / nbrSections
self.P_p_in = zeros(nbrSections)
self.P_s_in = zeros(nbrSections)
self.P_ase_in = zeros(nbrSections)
self.P_p_out = zeros(nbrSections)
self.P_s_out = zeros(nbrSections)
self.P_ase_out = zeros(nbrSections)
# Initiale Conditions
self.P_p_out[0] = pumpPower
self.P_s_out[0] = signalPower
self.P_ase_out[0] = 0.0
def __repr__(self):
return "Very simple erbium-doped fiber amplifier"
def set_init_pumpPower(self, pumpPower):
self.P_p_out[0] = pumpPower
def set_init_signalPower(self, signalPower):
self.P_s_out[0] = signalPower
def set_init_asePower(self, asePower):
self.P_ase_out[0] = asePower
def inversion(self, P_p, P_s, P_ase):
'''
Compute the population inversion with input Power
'''
W13 = (self.sigma_abs_p * (P_p/(self.dopedFiber.width(self.pumpWL)*1E-12))) / (h*c/(self.pumpWL*1E-6))
W21 = (self.sigma_em_s * (P_s/(self.dopedFiber.width(self.signalWL)*1E-12))) / (h*c/(self.signalWL*1E-6))
W21 += (self.sigma_em_ase * (P_ase/(self.dopedFiber.width(self.aseWL)*1E-12))) / (h*c/(self.aseWL*1E-6))
W12 = (self.sigma_abs_s * (P_s/(self.dopedFiber.width(self.signalWL)*1E-12))) / (h*c/(self.signalWL*1E-6))
W12 += (self.sigma_abs_ase * (P_ase/(self.dopedFiber.width(self.aseWL)*1E-12))) / (h*c/(self.aseWL*1E-6))
N2 = self.dopedFiber.concDopant * ( (W13 + W12) / ((1/self.dopedFiber.tau) + W21 + W12 + W13) )
N1 = self.dopedFiber.concDopant - N2
return [N1,N2]
def computeSection(self, s):
[N1,N2] = self.inversion(self.P_p_out[s-1], self.P_s_out[s-1], self.P_ase_out[s-1])
w0 = array([self.P_p_out[s-1], self.P_s_out[s-1], self.P_ase_out[s-1]])
self.P_s_out[s] = self.P_s_out[s-1] + (self.sigma_em_s*N2 - self.sigma_abs_s*N1 - self.alpha_s) * self.P_s_out[s-1] * self.dopedFiber.modeOverlap(self.signalWL) * self.dz
self.P_p_out[s] = self.P_p_out[s-1] + (-self.sigma_abs_p*N1 - self.alpha_p) * self.P_p_out[s-1] * self.dopedFiber.modeOverlap(self.pumpWL) * self.dz
self.P_ase_out[s] = self.P_ase_out[s-1] + (self.sigma_em_ase*N2 - self.sigma_abs_ase*N1 - self.alpha_ase) * self.P_ase_out[s-1] * self.dopedFiber.modeOverlap(self.aseWL) * self.dz
self.P_ase_out[s] += 2*(h*c/(self.aseWL*1E-6)) * self.delta_nu * self.sigma_em_ase*N2 * self.dopedFiber.modeOverlap(self.aseWL) * self.dz
def simulateBySections(self):
'''
Compute all section
'''
for i in arange(1,self.nbrSections):
self.computeSection(i)
def simulate(self):
def diffEquations(w, z, sigma_abs_p, sigma_em_s, sigma_abs_s,
sigma_abs_ase, sigma_em_ase, Fiber, pWL, sWL, aseWL,
alpha_s, alpha_p, alpha_ase, delta_nu):
P_s, P_p, P_ase = w
W13 = (sigma_abs_p * (P_p/(Fiber.width(pWL)*1E-12))) / (h*c/(pWL*1E-6))
W21 = (sigma_em_s * (P_s/(Fiber.width(sWL)*1E-12))) / (h*c/(sWL*1E-6))
W21 += (sigma_em_ase * (P_ase/(Fiber.width(aseWL)*1E-12))) / (h*c/(aseWL*1E-6))
W12 = (sigma_abs_s * (P_s/(Fiber.width(sWL)*1E-12))) / (h*c/(sWL*1E-6))
W12 += (sigma_abs_ase * (P_ase/(Fiber.width(aseWL)*1E-12))) / (h*c/(aseWL*1E-6))
N2 = Fiber.concDopant * ( (W13 + W12) / ((1/Fiber.tau) + W21 + W12 + W13) )
N1 = Fiber.concDopant - N2
Ps = (sigma_em_s*N2 - sigma_abs_s*N1 - alpha_s) * P_s * Fiber.modeOverlap(sWL)
Pp = (-sigma_abs_p*N1 - alpha_p) * P_p * Fiber.modeOverlap(pWL)
Pase = (sigma_em_ase*N2 - sigma_abs_ase*N1 - alpha_ase) * P_ase * Fiber.modeOverlap(aseWL)
Pase += 2*(h*c/(aseWL*1E-6)) * delta_nu * sigma_em_ase*N2 * Fiber.modeOverlap(aseWL)
return [Ps, Pp, Pase]
w0 = array([self.P_s_out[0], self.P_p_out[0], self.P_ase_out[0]])
arguments = (self.sigma_abs_p, self.sigma_em_s, self.sigma_abs_s,
self.sigma_abs_ase, self.sigma_em_ase, self.dopedFiber,
self.pumpWL, self.signalWL, self.aseWL,
self.alpha_s, self.alpha_p, self.alpha_ase, self.delta_nu)
solution = integrate.odeint(diffEquations, w0, self.z, args=arguments)
self.P_s_out = solution[:,0]
self.P_p_out = solution[:,1]
self.P_ase_out = solution[:,2]
|
mlaprise/PyOFTK
|
PyOFTK/amplifier.py
|
Python
|
gpl-2.0
| 18,207
|
[
"ASE"
] |
452350b10b98cd499878422443b947c1b6c46a3acf23540d289aaeadbfff9748
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
VGG_A Benchmark
https://github.com/soumith/convnet-benchmarks
./vgg_a.py
./vgg_a.py -d f16
"""
from neon import NervanaObject
from neon.util.argparser import NeonArgparser
from neon.initializers import Gaussian
from neon.layers import Conv, Pooling, GeneralizedCost, Affine
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti
from neon.models import Model
from neon.data import ArrayIterator
import numpy as np
parser = NeonArgparser(__doc__)
args = parser.parse_args()
NervanaObject.be.bsz = 64
NervanaObject.be.enable_winograd = 4
# setup data provider
X_train = np.random.uniform(-1, 1, (64, 3*224*224))
y_train = np.random.uniform(-1, 1, (64, 1000))
train = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))
layers = [Conv((3, 3, 64), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 128), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),
Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),
Affine(nout=1000, init=Gaussian(scale=0.01), activation=Softmax())]
model = Model(layers=layers)
weight_sched = Schedule([22, 44, 65], (1/250.)**(1/3.))
opt_gdm = GradientDescentMomentum(0.01, 0.0, wdecay=0.0005, schedule=weight_sched)
opt = MultiOptimizer({'default': opt_gdm})
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.benchmark(train, cost=cost, optimizer=opt, niterations=10, nskip=1)
|
dongjoon-hyun/neon
|
examples/convnet-benchmarks/vgg_a.py
|
Python
|
apache-2.0
| 3,040
|
[
"Gaussian"
] |
f0637605b8234d29b393431ee49a120b8a838a5c8568999fc099c294a6e74128
|
from collections import OrderedDict
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = OrderedDict(
(
("{{first_name_female}} {{last_name}}", 0.97),
("{{prefix_female}} {{first_name_female}} {{last_name}}", 0.015),
("{{first_name_female}} {{last_name}} {{suffix_female}}", 0.02),
(
"{{prefix_female}} {{first_name_female}} {{last_name}} {{suffix_female}}",
0.005,
),
)
)
formats_nonbinary = OrderedDict(
(
("{{first_name_nonbinary}} {{last_name}}", 0.97),
("{{prefix_nonbinary}} {{first_name_nonbinary}} {{last_name}}", 0.015),
("{{first_name_nonbinary}} {{last_name}} {{suffix_nonbinary}}", 0.02),
(
"{{prefix_nonbinary}} {{first_name_nonbinary}} {{last_name}} {{suffix_nonbinary}}",
0.005,
),
)
)
formats_male = OrderedDict(
(
("{{first_name_male}} {{last_name}}", 0.97),
("{{prefix_male}} {{first_name_male}} {{last_name}}", 0.015),
("{{first_name_male}} {{last_name}} {{suffix_male}}", 0.02),
(
"{{prefix_male}} {{first_name_male}} {{last_name}} {{suffix_male}}",
0.005,
),
)
)
# Using random_element's dictionary weighting means that the
# formats = formats_male + formats_female
# has to be replaced with something dict and python 2.x compatible
formats = formats_male.copy()
formats.update(formats_female)
# Top 200 names of the decade from the 60's-90's from:
# https://www.ssa.gov/OACT/babynames/decades/names1960s.html
# Weightings derived from total number on each name
first_names_female = OrderedDict(
(
("April", 0.004529083),
("Abigail", 0.002043839),
("Adriana", 0.000488767),
("Adrienne", 0.000622931),
("Aimee", 0.000424727),
("Alejandra", 0.000415754),
("Alexa", 0.000663005),
("Alexandra", 0.002835711),
("Alexandria", 0.000964993),
("Alexis", 0.003446735),
("Alice", 0.000589904),
("Alicia", 0.003766845),
("Alisha", 0.000475942),
("Alison", 0.001506047),
("Allison", 0.003740866),
("Alyssa", 0.00324341),
("Amanda", 0.015360768),
("Amber", 0.006928794),
("Amy", 0.012860314),
("Ana", 0.000853679),
("Andrea", 0.006747028),
("Angel", 0.001161117),
("Angela", 0.011954085),
("Angelica", 0.001102746),
("Angie", 0.00030166),
("Anita", 0.001383767),
("Ann", 0.002627483),
("Anna", 0.004691502),
("Anne", 0.002089582),
("Annette", 0.001487399),
("Ariana", 0.000412668),
("Ariel", 0.000615774),
("Ashlee", 0.000696534),
("Ashley", 0.014773009),
("Audrey", 0.001139165),
("Autumn", 0.000918594),
("Bailey", 0.000691916),
("Barbara", 0.004839169),
("Becky", 0.000960944),
("Belinda", 0.000502227),
("Beth", 0.002246113),
("Bethany", 0.001249385),
("Betty", 0.000840241),
("Beverly", 0.000990272),
("Bianca", 0.000624835),
("Bonnie", 0.001351901),
("Brandi", 0.002077216),
("Brandy", 0.002177499),
("Breanna", 0.000876003),
("Brenda", 0.005737124),
("Briana", 0.00093665),
("Brianna", 0.002543549),
("Bridget", 0.000787232),
("Brittany", 0.007258404),
("Brittney", 0.001566147),
("Brooke", 0.002410152),
("Caitlin", 0.001808319),
("Caitlyn", 0.000481194),
("Candace", 0.000550662),
("Candice", 0.000653199),
("Carla", 0.00195185),
("Carly", 0.000498725),
("Carmen", 0.000891783),
("Carol", 0.002972719),
("Caroline", 0.001198127),
("Carolyn", 0.002647225),
("Carrie", 0.002934659),
("Casey", 0.001177707),
("Cassandra", 0.002501243),
("Cassidy", 0.000452129),
("Cassie", 0.000344886),
("Catherine", 0.004460622),
("Cathy", 0.001413248),
("Charlene", 0.000538865),
("Charlotte", 0.000530417),
("Chelsea", 0.00280043),
("Chelsey", 0.000368501),
("Cheryl", 0.004166447),
("Cheyenne", 0.000696907),
("Chloe", 0.000565807),
("Christie", 0.000397873),
("Christina", 0.008735669),
("Christine", 0.007488758),
("Christy", 0.00141861),
("Cindy", 0.003360109),
("Claire", 0.000553835),
("Claudia", 0.00096055),
("Colleen", 0.001836203),
("Connie", 0.001821845),
("Courtney", 0.00484939),
("Cristina", 0.000328734),
("Crystal", 0.006365045),
("Cynthia", 0.007655379),
("Daisy", 0.000437443),
("Dana", 0.003395805),
("Danielle", 0.006671783),
("Darlene", 0.000952737),
("Dawn", 0.005014983),
("Deanna", 0.002049026),
("Debbie", 0.001842922),
("Deborah", 0.005386088),
("Debra", 0.004123572),
("Denise", 0.004592291),
("Desiree", 0.000991497),
("Destiny", 0.001055515),
("Diamond", 0.000331732),
("Diana", 0.003699348),
("Diane", 0.003058996),
("Dominique", 0.000847857),
("Donna", 0.00570819),
("Doris", 0.000398026),
("Dorothy", 0.000722426),
("Ebony", 0.000399624),
("Eileen", 0.000544271),
("Elaine", 0.000601175),
("Elizabeth", 0.014954075),
("Ellen", 0.000747267),
("Emily", 0.009100581),
("Emma", 0.001272059),
("Erica", 0.004344471),
("Erika", 0.002105537),
("Erin", 0.005450719),
("Evelyn", 0.000825095),
("Faith", 0.000427113),
("Felicia", 0.001717294),
("Frances", 0.000546897),
("Gabriela", 0.000526937),
("Gabriella", 0.00044123),
("Gabrielle", 0.001090096),
("Gail", 0.00071934),
("Gina", 0.002841095),
("Glenda", 0.000384982),
("Gloria", 0.001155623),
("Grace", 0.00087202),
("Gwendolyn", 0.000407831),
("Hailey", 0.000662917),
("Haley", 0.001557939),
("Hannah", 0.004189822),
("Hayley", 0.000478305),
("Heather", 0.010945254),
("Heidi", 0.002239941),
("Helen", 0.000636675),
("Holly", 0.003487028),
("Isabel", 0.000352305),
("Isabella", 0.000410282),
("Jackie", 0.000566748),
("Jaclyn", 0.00047708),
("Jacqueline", 0.004811242),
("Jade", 0.000446264),
("Jaime", 0.000853175),
("Jamie", 0.005067663),
("Jane", 0.0009486),
("Janet", 0.002489993),
("Janice", 0.001593308),
("Jasmin", 0.000333374),
("Jasmine", 0.003025422),
("Jean", 0.000815969),
("Jeanette", 0.000767293),
("Jeanne", 0.000515381),
("Jenna", 0.001804052),
("Jennifer", 0.029218839),
("Jenny", 0.000932667),
("Jessica", 0.020047608),
("Jill", 0.003253018),
("Jillian", 0.000988587),
("Jo", 0.000442083),
("Joan", 0.000802793),
("Joann", 0.000544336),
("Joanna", 0.001176284),
("Joanne", 0.000729824),
("Jocelyn", 0.000456878),
("Jodi", 0.001252405),
("Jody", 0.000741861),
("Jordan", 0.001653057),
("Joy", 0.000916515),
("Joyce", 0.001009488),
("Judith", 0.000870706),
("Judy", 0.001101586),
("Julia", 0.003301891),
("Julie", 0.008211731),
("Kaitlin", 0.000674473),
("Kaitlyn", 0.001478623),
("Kara", 0.001549119),
("Karen", 0.009643845),
("Kari", 0.000794323),
("Karina", 0.000494764),
("Karla", 0.000387696),
("Katelyn", 0.001476128),
("Katherine", 0.006581479),
("Kathleen", 0.00503549),
("Kathryn", 0.004177806),
("Kathy", 0.002710214),
("Katie", 0.003056216),
("Katrina", 0.001565446),
("Kayla", 0.004621465),
("Kaylee", 0.000551734),
("Kelli", 0.000932163),
("Kellie", 0.000299187),
("Kelly", 0.009342929),
("Kelsey", 0.002470383),
("Kendra", 0.001401079),
("Kerri", 0.000316215),
("Kerry", 0.000352984),
("Kiara", 0.000390037),
("Kim", 0.002518642),
("Kimberly", 0.015594077),
("Kirsten", 0.000369486),
("Krista", 0.001266872),
("Kristen", 0.004345587),
("Kristi", 0.001022926),
("Kristie", 0.000380189),
("Kristin", 0.003613728),
("Kristina", 0.002316281),
("Kristine", 0.000977709),
("Kristy", 0.001097734),
("Krystal", 0.001238113),
("Kylie", 0.00049739),
("Lacey", 0.00045469),
("Latasha", 0.00032904),
("Latoya", 0.000646371),
("Laura", 0.010815096),
("Lauren", 0.007015421),
("Laurie", 0.002200786),
("Leah", 0.001997571),
("Leslie", 0.003606134),
("Linda", 0.006437751),
("Lindsay", 0.002185466),
("Lindsey", 0.002646153),
("Lisa", 0.01872729),
("Loretta", 0.000482945),
("Lori", 0.006040316),
("Lorraine", 0.000486753),
("Lydia", 0.000370274),
("Lynn", 0.001522308),
("Mackenzie", 0.000761056),
("Madeline", 0.000808921),
("Madison", 0.002011184),
("Makayla", 0.000439391),
("Mallory", 0.000688633),
("Mandy", 0.000355566),
("Marcia", 0.000403213),
("Margaret", 0.003839968),
("Maria", 0.006593123),
("Mariah", 0.00097598),
("Marie", 0.001520229),
("Marilyn", 0.000590889),
("Marisa", 0.000339983),
("Marissa", 0.001582627),
("Martha", 0.001290028),
("Mary", 0.014288466),
("Maureen", 0.000753855),
("Mckenzie", 0.000334512),
("Meagan", 0.000729999),
("Megan", 0.007686786),
("Meghan", 0.001481578),
("Melanie", 0.003400117),
("Melinda", 0.002078113),
("Melissa", 0.014890692),
("Melody", 0.000404264),
("Mercedes", 0.000334643),
("Meredith", 0.000766987),
("Mia", 0.000319935),
("Michaela", 0.000506998),
("Michele", 0.003519551),
("Michelle", 0.01527423),
("Mikayla", 0.000410195),
("Mindy", 0.000306891),
("Miranda", 0.001421193),
("Misty", 0.001564614),
("Molly", 0.001710641),
("Monica", 0.004324095),
("Monique", 0.001272125),
("Morgan", 0.002527025),
("Nancy", 0.005023343),
("Natalie", 0.003658398),
("Natasha", 0.001739815),
("Nichole", 0.001001237),
("Nicole", 0.011156655),
("Nina", 0.000298115),
("Norma", 0.000470754),
("Olivia", 0.001967609),
("Paige", 0.001106313),
("Pam", 0.000374454),
("Pamela", 0.005816222),
("Patricia", 0.008349353),
("Patty", 0.000383493),
("Paula", 0.002478284),
("Peggy", 0.000810606),
("Penny", 0.000836564),
("Phyllis", 0.000562437),
("Priscilla", 0.000350226),
("Rachael", 0.001098128),
("Rachel", 0.00876108),
("Raven", 0.000404855),
("Rebecca", 0.010563161),
("Rebekah", 0.000858581),
("Regina", 0.001941739),
("Renee", 0.00257883),
("Rhonda", 0.002879221),
("Rita", 0.000719187),
("Roberta", 0.000461715),
("Robin", 0.00409199),
("Robyn", 0.00032138),
("Rose", 0.000697125),
("Ruth", 0.001041946),
("Sabrina", 0.001920969),
("Sally", 0.000532912),
("Samantha", 0.008186124),
("Sandra", 0.006473426),
("Sandy", 0.000497106),
("Sara", 0.005619879),
("Sarah", 0.014434273),
("Savannah", 0.000978344),
("Selena", 0.000329106),
("Shannon", 0.005952552),
("Shari", 0.000449043),
("Sharon", 0.004796469),
("Shawna", 0.000354209),
("Sheena", 0.000355763),
("Sheila", 0.00220129),
("Shelby", 0.001575601),
("Shelia", 0.000403673),
("Shelley", 0.000922227),
("Shelly", 0.001339469),
("Sheri", 0.000913166),
("Sherri", 0.001285038),
("Sherry", 0.002445235),
("Sheryl", 0.00057025),
("Shirley", 0.000833259),
("Sierra", 0.000954816),
("Sonia", 0.000332739),
("Sonya", 0.000914085),
("Sophia", 0.000535976),
("Stacey", 0.002836761),
("Stacie", 0.0003903),
("Stacy", 0.00311717),
("Stefanie", 0.00034644),
("Stephanie", 0.013595762),
("Sue", 0.000472877),
("Summer", 0.000411508),
("Susan", 0.0088973),
("Suzanne", 0.001943577),
("Sydney", 0.001220101),
("Sylvia", 0.000625798),
("Tabitha", 0.000428404),
("Tamara", 0.00212948),
("Tami", 0.000403651),
("Tammie", 0.00042337),
("Tammy", 0.006493584),
("Tanya", 0.002039024),
("Tara", 0.00316834),
("Tasha", 0.000355807),
("Taylor", 0.003996871),
("Teresa", 0.005060003),
("Terri", 0.001823903),
("Terry", 0.00060494),
("Theresa", 0.003492762),
("Tiffany", 0.006594283),
("Tina", 0.005186419),
("Toni", 0.000891695),
("Tonya", 0.002404133),
("Tracey", 0.001511146),
("Traci", 0.00086193),
("Tracie", 0.000301901),
("Tracy", 0.00498572),
("Tricia", 0.000449196),
("Valerie", 0.003218022),
("Vanessa", 0.003779189),
("Veronica", 0.003017805),
("Vicki", 0.00088653),
("Vickie", 0.000695199),
("Victoria", 0.005237677),
("Virginia", 0.001496482),
("Wanda", 0.001336186),
("Wendy", 0.004058263),
("Whitney", 0.001690768),
("Yesenia", 0.000331951),
("Yolanda", 0.001213819),
("Yvette", 0.000483427),
("Yvonne", 0.001005483),
("Zoe", 0.000367407),
)
)
first_names_male = OrderedDict(
(
("Aaron", 0.006741589),
("Adam", 0.007124922),
("Adrian", 0.001521889),
("Alan", 0.002344657),
("Albert", 0.001316595),
("Alec", 0.000442958),
("Alejandro", 0.000862489),
("Alex", 0.002111833),
("Alexander", 0.005215733),
("Alexis", 0.000277915),
("Alfred", 0.000318919),
("Allen", 0.001679613),
("Alvin", 0.00024794),
("Andre", 0.001400621),
("Andres", 0.000335574),
("Andrew", 0.013475074),
("Angel", 0.000902262),
("Anthony", 0.013783357),
("Antonio", 0.002392535),
("Arthur", 0.001342637),
("Austin", 0.003785615),
("Barry", 0.001102751),
("Benjamin", 0.006535474),
("Bernard", 0.000298691),
("Bill", 0.000430013),
("Billy", 0.001749806),
("Blake", 0.001218155),
("Bob", 0.000235731),
("Bobby", 0.001666977),
("Brad", 0.000984544),
("Bradley", 0.003845018),
("Brady", 0.000277522),
("Brandon", 0.009518346),
("Brendan", 0.000736758),
("Brent", 0.001889131),
("Brett", 0.002248371),
("Brian", 0.01597677),
("Bruce", 0.001883335),
("Bryan", 0.00456454),
("Bryce", 0.000457406),
("Caleb", 0.001485861),
("Calvin", 0.001168738),
("Cameron", 0.00180755),
("Carl", 0.002011802),
("Carlos", 0.00266638),
("Casey", 0.001440035),
("Cesar", 0.000304898),
("Chad", 0.003858817),
("Charles", 0.010889881),
("Chase", 0.000971942),
("Chris", 0.001389507),
("Christian", 0.003097779),
("Christopher", 0.02783596),
("Clarence", 0.000299289),
("Clayton", 0.000662222),
("Clifford", 0.00053078),
("Clinton", 0.000579307),
("Cody", 0.00353482),
("Cole", 0.000578811),
("Colin", 0.00078508),
("Collin", 0.000406057),
("Colton", 0.000520845),
("Connor", 0.000981073),
("Corey", 0.002476612),
("Cory", 0.001813005),
("Craig", 0.00338161),
("Cristian", 0.000333847),
("Curtis", 0.002140235),
("Dakota", 0.000797614),
("Dale", 0.001171354),
("Dalton", 0.000615113),
("Damon", 0.00034308),
("Dan", 0.000388496),
("Daniel", 0.018881874),
("Danny", 0.001873879),
("Darin", 0.000234962),
("Darius", 0.000336189),
("Darrell", 0.001218582),
("Darren", 0.001253738),
("Darryl", 0.00067019),
("Daryl", 0.000260918),
("Dave", 0.000269673),
("David", 0.031073833),
("Dean", 0.000965375),
("Dennis", 0.003318992),
("Derek", 0.003095299),
("Derrick", 0.001955921),
("Devin", 0.001312474),
("Devon", 0.000485877),
("Dillon", 0.000558361),
("Dominic", 0.000438221),
("Don", 0.000378322),
("Donald", 0.005689572),
("Douglas", 0.004513687),
("Drew", 0.000596868),
("Duane", 0.00061855),
("Dustin", 0.003088938),
("Dwayne", 0.000711382),
("Dylan", 0.002329096),
("Earl", 0.000348347),
("Eddie", 0.0007944),
("Edgar", 0.000379536),
("Eduardo", 0.000465358),
("Edward", 0.005702242),
("Edwin", 0.001117833),
("Elijah", 0.000592183),
("Eric", 0.012024659),
("Erik", 0.001997096),
("Ernest", 0.000746556),
("Ethan", 0.001143978),
("Eugene", 0.000784243),
("Evan", 0.001570691),
("Fernando", 0.000557608),
("Francis", 0.000330837),
("Francisco", 0.001084335),
("Frank", 0.003276449),
("Franklin", 0.000237561),
("Fred", 0.000396618),
("Frederick", 0.001104188),
("Gabriel", 0.001906504),
("Garrett", 0.001124861),
("Gary", 0.005023109),
("Gavin", 0.000295373),
("Gene", 0.00023426),
("Geoffrey", 0.000425978),
("George", 0.004423984),
("Gerald", 0.00165841),
("Gilbert", 0.000246726),
("Glen", 0.000374338),
("Glenn", 0.001111421),
("Gordon", 0.00027075),
("Grant", 0.00068322),
("Greg", 0.000623492),
("Gregg", 0.000235885),
("Gregory", 0.007676443),
("Guy", 0.000262645),
("Harold", 0.000929467),
("Harry", 0.000586934),
("Hayden", 0.000279454),
("Hector", 0.000798691),
("Henry", 0.001856232),
("Herbert", 0.000234226),
("Howard", 0.000712921),
("Hunter", 0.001034679),
("Ian", 0.001863192),
("Isaac", 0.001001951),
("Isaiah", 0.000625441),
("Ivan", 0.000350433),
("Jack", 0.001839748),
("Jackson", 0.000403253),
("Jacob", 0.007845384),
("Jaime", 0.000421378),
("Jake", 0.000565782),
("James", 0.029601617),
("Jamie", 0.00093552),
("Jared", 0.002538802),
("Jason", 0.01520513),
("Javier", 0.000625202),
("Jay", 0.001411462),
("Jeff", 0.001271436),
("Jeffery", 0.002627873),
("Jeffrey", 0.01225709),
("Jeremiah", 0.001209605),
("Jeremy", 0.006336079),
("Jermaine", 0.000450156),
("Jerome", 0.000634299),
("Jerry", 0.003150273),
("Jesse", 0.003884552),
("Jesus", 0.001628965),
("Jim", 0.000567714),
("Jimmy", 0.001607489),
("Joe", 0.001621544),
("Joel", 0.002537742),
("John", 0.028683008),
("Johnathan", 0.000840448),
("Johnny", 0.002117065),
("Jon", 0.001561184),
("Jonathan", 0.009963971),
("Jonathon", 0.000701157),
("Jordan", 0.003451546),
("Jorge", 0.001180553),
("Jose", 0.005368207),
("Joseph", 0.018604763),
("Joshua", 0.014808101),
("Juan", 0.003233598),
("Julian", 0.000693736),
("Justin", 0.010197889),
("Karl", 0.000362437),
("Keith", 0.004622866),
("Kelly", 0.000775283),
("Kenneth", 0.008318145),
("Kent", 0.000329418),
("Kerry", 0.000261448),
("Kevin", 0.014324157),
("Kirk", 0.0003801),
("Kristopher", 0.000580692),
("Kurt", 0.000716375),
("Kyle", 0.006350049),
("Lance", 0.001048495),
("Larry", 0.003658807),
("Lawrence", 0.001670294),
("Lee", 0.001223883),
("Leon", 0.000236347),
("Leonard", 0.000756713),
("Leroy", 0.000260234),
("Leslie", 0.000234637),
("Levi", 0.000347184),
("Logan", 0.001325812),
("Lonnie", 0.000258576),
("Louis", 0.001212255),
("Lucas", 0.001098237),
("Luis", 0.002427777),
("Luke", 0.001221455),
("Malik", 0.000306813),
("Manuel", 0.001331369),
("Marc", 0.001431947),
("Marco", 0.000290586),
("Marcus", 0.002604122),
("Mario", 0.001229337),
("Mark", 0.014382277),
("Martin", 0.002085226),
("Marvin", 0.000732962),
("Mason", 0.000562037),
("Mathew", 0.000605555),
("Matthew", 0.020425018),
("Maurice", 0.000777078),
("Max", 0.000311276),
("Maxwell", 0.000357478),
("Melvin", 0.00061932),
("Michael", 0.045602241),
("Micheal", 0.001273847),
("Miguel", 0.001416267),
("Mike", 0.001221797),
("Mitchell", 0.001747788),
("Nathan", 0.005039405),
("Nathaniel", 0.001887558),
("Neil", 0.000240331),
("Nicholas", 0.010021219),
("Nicolas", 0.000362522),
("Noah", 0.000960947),
("Norman", 0.000389043),
("Omar", 0.000639052),
("Oscar", 0.000946583),
("Parker", 0.000277522),
("Patrick", 0.007153255),
("Paul", 0.009272953),
("Pedro", 0.000275726),
("Perry", 0.000258644),
("Peter", 0.004340385),
("Philip", 0.002262956),
("Phillip", 0.00280273),
("Preston", 0.000292022),
("Ralph", 0.000836891),
("Randall", 0.001614722),
("Randy", 0.003021926),
("Ray", 0.000379451),
("Raymond", 0.003493952),
("Reginald", 0.00095108),
("Ricardo", 0.001197276),
("Richard", 0.014131961),
("Rick", 0.000440016),
("Rickey", 0.00023833),
("Ricky", 0.001856882),
("Riley", 0.000322031),
("Robert", 0.026938092),
("Roberto", 0.000906024),
("Rodney", 0.002180555),
("Roger", 0.002038032),
("Ronald", 0.00576775),
("Ronnie", 0.000905938),
("Ross", 0.00026863),
("Roy", 0.001311346),
("Ruben", 0.000774821),
("Russell", 0.002096221),
("Ryan", 0.01128178),
("Samuel", 0.00498019),
("Scott", 0.010580999),
("Sean", 0.005593456),
("Sergio", 0.000568518),
("Seth", 0.001537416),
("Shane", 0.002530218),
("Shannon", 0.000421583),
("Shaun", 0.000748761),
("Shawn", 0.004474546),
("Spencer", 0.000912094),
("Stanley", 0.000739032),
("Stephen", 0.007675365),
("Steve", 0.001407564),
("Steven", 0.013292898),
("Stuart", 0.000238826),
("Tanner", 0.000639292),
("Taylor", 0.00133036),
("Terrance", 0.000203311),
("Terrence", 0.000203704),
("Terry", 0.002873624),
("Theodore", 0.000596561),
("Thomas", 0.0143364),
("Tim", 0.000711126),
("Timothy", 0.012632608),
("Todd", 0.00414612),
("Tom", 0.000499283),
("Tommy", 0.000778737),
("Tony", 0.002511563),
("Tracy", 0.000728259),
("Travis", 0.004022458),
("Trevor", 0.001692523),
("Tristan", 0.000408759),
("Troy", 0.002695415),
("Tyler", 0.005962323),
("Tyrone", 0.000587207),
("Vernon", 0.000246401),
("Victor", 0.002340621),
("Vincent", 0.002494515),
("Walter", 0.001525891),
("Warren", 0.000317414),
("Wayne", 0.00160966),
("Wesley", 0.001733835),
("William", 0.020025989),
("Willie", 0.001379247),
("Wyatt", 0.000306591),
("Xavier", 0.000415222),
("Zachary", 0.005918634),
)
)
first_names = first_names_male.copy()
first_names.update(first_names_female)
first_names_nonbinary = first_names_male.copy()
first_names_nonbinary.update(first_names_female)
# Top 1000 US surnames from US Census data
# Weighted by number of occurrences
# By way of http://names.mongabay.com/data/1000.html on 2/10/2016
last_names = OrderedDict(
(
("Smith", 0.021712045),
("Johnson", 0.01696938),
("Williams", 0.014016962),
("Brown", 0.012610763),
("Jones", 0.012451866),
("Miller", 0.010305045),
("Davis", 0.009798219),
("Garcia", 0.007842422),
("Rodriguez", 0.007348561),
("Wilson", 0.007154951),
("Martinez", 0.007082045),
("Anderson", 0.006966203),
("Taylor", 0.006582218),
("Thomas", 0.006493824),
("Hernandez", 0.006454314),
("Moore", 0.006383948),
("Martin", 0.006146745),
("Jackson", 0.006086567),
("Thompson", 0.005887767),
("White", 0.005843424),
("Lopez", 0.005679145),
("Lee", 0.005535909),
("Gonzalez", 0.005461513),
("Harris", 0.005423356),
("Clark", 0.005010598),
("Lewis", 0.00465937),
("Robinson", 0.004596305),
("Walker", 0.004580579),
("Perez", 0.00446375),
("Hall", 0.004327121),
("Young", 0.004257495),
("Allen", 0.00423392),
("Sanchez", 0.004031749),
("Wright", 0.004023754),
("King", 0.004011135),
("Scott", 0.003838487),
("Green", 0.003778053),
("Baker", 0.003776901),
("Adams", 0.00377448),
("Nelson", 0.003766713),
("Hill", 0.003762455),
("Ramirez", 0.003554281),
("Campbell", 0.003398636),
("Mitchell", 0.003357336),
("Roberts", 0.003346207),
("Carter", 0.0033127),
("Phillips", 0.003214932),
("Evans", 0.003127113),
("Turner", 0.003067045),
("Torres", 0.002971158),
("Parker", 0.002962725),
("Collins", 0.002904264),
("Edwards", 0.002897155),
("Stewart", 0.002859044),
("Flores", 0.002856449),
("Morris", 0.002848582),
("Nguyen", 0.002833697),
("Murphy", 0.00274576),
("Rivera", 0.002736275),
("Cook", 0.002693623),
("Rogers", 0.002690041),
("Morgan", 0.002525543),
("Peterson", 0.002513125),
("Cooper", 0.00246795),
("Reed", 0.0024437),
("Bailey", 0.002429747),
("Bell", 0.002419112),
("Gomez", 0.002408494),
("Kelly", 0.002379209),
("Howard", 0.002327986),
("Ward", 0.002321973),
("Cox", 0.002318775),
("Diaz", 0.00230051),
("Richardson", 0.002280051),
("Wood", 0.002259639),
("Watson", 0.002215168),
("Brooks", 0.002199808),
("Bennett", 0.002184311),
("Gray", 0.002162912),
("James", 0.002131032),
("Reyes", 0.002124517),
("Cruz", 0.002111304),
("Hughes", 0.002095999),
("Price", 0.002090206),
("Myers", 0.002054278),
("Long", 0.002042126),
("Foster", 0.002019703),
("Sanders", 0.002018442),
("Ross", 0.002009844),
("Morales", 0.001988655),
("Powell", 0.001978704),
("Sullivan", 0.001970362),
("Russell", 0.001968461),
("Ortiz", 0.001961617),
("Jenkins", 0.001952974),
("Gutierrez", 0.001945371),
("Perry", 0.001942986),
("Butler", 0.001926859),
("Barnes", 0.00192272),
("Fisher", 0.001921377),
("Henderson", 0.001919686),
("Coleman", 0.001906255),
("Simmons", 0.001842531),
("Patterson", 0.00181427),
("Jordan", 0.00180198),
("Reynolds", 0.001787233),
("Hamilton", 0.001775656),
("Graham", 0.001773307),
("Kim", 0.001773243),
("Gonzales", 0.001772028),
("Alexander", 0.001767542),
("Ramos", 0.001764371),
("Wallace", 0.001743026),
("Griffin", 0.001741893),
("West", 0.001722047),
("Cole", 0.001715916),
("Hayes", 0.001712992),
("Chavez", 0.001698299),
("Gibson", 0.001685096),
("Bryant", 0.001679075),
("Ellis", 0.001662381),
("Stevens", 0.001657657),
("Murray", 0.001630218),
("Ford", 0.001630062),
("Marshall", 0.001619244),
("Owens", 0.001611212),
("Mcdonald", 0.001609019),
("Harrison", 0.001604295),
("Ruiz", 0.001602943),
("Kennedy", 0.001568285),
("Wells", 0.001559139),
("Alvarez", 0.001542527),
("Woods", 0.0015425),
("Mendoza", 0.001540243),
("Castillo", 0.001511972),
("Olson", 0.001493963),
("Webb", 0.001493771),
("Washington", 0.001489705),
("Tucker", 0.001488763),
("Freeman", 0.001486507),
("Burns", 0.001481636),
("Henry", 0.001474683),
("Vasquez", 0.001461863),
("Snyder", 0.001456143),
("Simpson", 0.001445891),
("Crawford", 0.001444795),
("Jimenez", 0.001438892),
("Porter", 0.001433163),
("Mason", 0.0014207),
("Shaw", 0.001417849),
("Gordon", 0.001415674),
("Wagner", 0.001411855),
("Hunter", 0.001410886),
("Romero", 0.001405057),
("Hicks", 0.00140365),
("Dixon", 0.001389003),
("Hunt", 0.001388738),
("Palmer", 0.00137431),
("Robertson", 0.001373323),
("Black", 0.001372291),
("Holmes", 0.001372108),
("Stone", 0.001368782),
("Meyer", 0.001367521),
("Boyd", 0.001365803),
("Mills", 0.001351485),
("Warren", 0.001351458),
("Fox", 0.001346441),
("Rose", 0.001342485),
("Rice", 0.001338062),
("Moreno", 0.001334846),
("Schmidt", 0.001330067),
("Patel", 0.001325508),
("Ferguson", 0.001299832),
("Nichols", 0.001296908),
("Herrera", 0.0012864),
("Medina", 0.001273307),
("Ryan", 0.001273142),
("Fernandez", 0.001272841),
("Weaver", 0.001268354),
("Daniels", 0.001268034),
("Stephens", 0.001267724),
("Gardner", 0.001266974),
("Payne", 0.0012612),
("Kelley", 0.001256878),
("Dunn", 0.001251395),
("Pierce", 0.001247393),
("Arnold", 0.001245547),
("Tran", 0.001243537),
("Spencer", 0.001228443),
("Peters", 0.001226505),
("Hawkins", 0.001224998),
("Grant", 0.001224705),
("Hansen", 0.001219589),
("Castro", 0.001217578),
("Hoffman", 0.001212014),
("Hart", 0.001210378),
("Elliott", 0.001210296),
("Cunningham", 0.00120517),
("Knight", 0.001204841),
("Bradley", 0.001199624),
("Carroll", 0.001197166),
("Hudson", 0.001195091),
("Duncan", 0.001191674),
("Armstrong", 0.001187681),
("Berry", 0.001182409),
("Andrews", 0.001181632),
("Johnston", 0.001178114),
("Ray", 0.001176826),
("Lane", 0.001176214),
("Riley", 0.001169206),
("Carpenter", 0.001161101),
("Perkins", 0.001159986),
("Aguilar", 0.001154942),
("Silva", 0.001152795),
("Richards", 0.001148126),
("Willis", 0.001147888),
("Matthews", 0.001140688),
("Chapman", 0.001138632),
("Lawrence", 0.001135955),
("Garza", 0.00113421),
("Vargas", 0.001132583),
("Watkins", 0.001118832),
("Wheeler", 0.00111186),
("Larson", 0.001106195),
("Carlson", 0.001097606),
("Harper", 0.001095267),
("George", 0.001094444),
("Greene", 0.001092855),
("Burke", 0.001088935),
("Guzman", 0.001081762),
("Morrison", 0.001077641),
("Munoz", 0.001076133),
("Jacobs", 0.001055721),
("Obrien", 0.001054304),
("Lawson", 0.001052486),
("Franklin", 0.001049498),
("Lynch", 0.001045743),
("Bishop", 0.00104196),
("Carr", 0.001040662),
("Salazar", 0.001036788),
("Austin", 0.001033974),
("Mendez", 0.0010301),
("Gilbert", 0.001027084),
("Jensen", 0.001026408),
("Williamson", 0.001025348),
("Montgomery", 0.00102469),
("Harvey", 0.001024617),
("Oliver", 0.001020094),
("Howell", 0.001001756),
("Dean", 0.000998064),
("Hanson", 0.000996685),
("Weber", 0.000985601),
("Garrett", 0.000984788),
("Sims", 0.000979918),
("Burton", 0.000979132),
("Fuller", 0.000974783),
("Soto", 0.000974317),
("Mccoy", 0.000972946),
("Welch", 0.00096676),
("Chen", 0.000964384),
("Schultz", 0.000959067),
("Walters", 0.000952844),
("Reid", 0.00095034),
("Fields", 0.00094335),
("Walsh", 0.000943113),
("Little", 0.000938563),
("Fowler", 0.000937667),
("Bowman", 0.000934186),
("Davidson", 0.000932404),
("May", 0.000929498),
("Day", 0.000929041),
("Schneider", 0.00091878),
("Newman", 0.000918214),
("Brewer", 0.000917976),
("Lucas", 0.000917538),
("Holland", 0.000912677),
("Wong", 0.000908172),
("Banks", 0.000907276),
("Santos", 0.000904526),
("Curtis", 0.000904206),
("Pearson", 0.000902105),
("Delgado", 0.000901621),
("Valdez", 0.000901027),
("Pena", 0.000898605),
("Rios", 0.000882377),
("Douglas", 0.000881062),
("Sandoval", 0.000879947),
("Barrett", 0.000876228),
("Hopkins", 0.000864414),
("Keller", 0.000861645),
("Guerrero", 0.000860293),
("Stanley", 0.000857232),
("Bates", 0.000856555),
("Alvarado", 0.000856373),
("Beck", 0.000851238),
("Ortega", 0.000850963),
("Wade", 0.00084825),
("Estrada", 0.000848222),
("Contreras", 0.00084666),
("Barnett", 0.000843252),
("Caldwell", 0.00083458),
("Santiago", 0.00083119),
("Lambert", 0.000828001),
("Powers", 0.000826019),
("Chambers", 0.000825324),
("Nunez", 0.000824255),
("Craig", 0.000818618),
("Leonard", 0.000815027),
("Lowe", 0.000814844),
("Rhodes", 0.000812459),
("Byrd", 0.00081149),
("Gregory", 0.000811481),
("Shelton", 0.000807059),
("Frazier", 0.00080705),
("Becker", 0.000805122),
("Maldonado", 0.000804226),
("Fleming", 0.000803614),
("Vega", 0.000801595),
("Sutton", 0.000798351),
("Cohen", 0.000797008),
("Jennings", 0.00079529),
("Parks", 0.000788967),
("Mcdaniel", 0.000788702),
("Watts", 0.000787889),
("Barker", 0.000778688),
("Norris", 0.000778605),
("Vaughn", 0.000777006),
("Vazquez", 0.000775992),
("Holt", 0.000774018),
("Schwartz", 0.000773918),
("Steele", 0.000770756),
("Benson", 0.00076966),
("Neal", 0.000766151),
("Dominguez", 0.000765073),
("Horton", 0.000763173),
("Terry", 0.000762387),
("Wolfe", 0.000759417),
("Hale", 0.000757983),
("Lyons", 0.000751614),
("Graves", 0.000750892),
("Haynes", 0.000749595),
("Miles", 0.000748644),
("Park", 0.000748251),
("Warner", 0.000747648),
("Padilla", 0.000747475),
("Bush", 0.000744907),
("Thornton", 0.000741864),
("Mccarthy", 0.000740439),
("Mann", 0.00074032),
("Zimmerman", 0.000739608),
("Erickson", 0.000739534),
("Fletcher", 0.000739498),
("Mckinney", 0.00073661),
("Page", 0.000735487),
("Dawson", 0.000732718),
("Joseph", 0.000731256),
("Marquez", 0.000730534),
("Reeves", 0.00072931),
("Klein", 0.000728104),
("Espinoza", 0.000724787),
("Baldwin", 0.000723224),
("Moran", 0.000717696),
("Love", 0.000715659),
("Robbins", 0.000713996),
("Higgins", 0.000713685),
("Ball", 0.000708696),
("Cortez", 0.000708066),
("Le", 0.000707709),
("Griffith", 0.00070749),
("Bowen", 0.000704283),
("Sharp", 0.000702364),
("Cummings", 0.000700893),
("Ramsey", 0.000700144),
("Hardy", 0.000699988),
("Swanson", 0.000699358),
("Barber", 0.000699038),
("Acosta", 0.000698791),
("Luna", 0.000695593),
("Chandler", 0.000695474),
("Daniel", 0.000686529),
("Blair", 0.000686529),
("Cross", 0.00068652),
("Simon", 0.000683824),
("Dennis", 0.000683322),
("Oconnor", 0.000683066),
("Quinn", 0.00068101),
("Gross", 0.000678762),
("Navarro", 0.000675884),
("Moss", 0.000673874),
("Fitzgerald", 0.000671791),
("Doyle", 0.000671754),
("Mclaughlin", 0.000668191),
("Rojas", 0.00066767),
("Rodgers", 0.000667213),
("Stevenson", 0.000666034),
("Singh", 0.00066375),
("Yang", 0.000663613),
("Figueroa", 0.000662754),
("Harmon", 0.000661667),
("Newton", 0.000660881),
("Paul", 0.00066015),
("Manning", 0.000658514),
("Garner", 0.000658359),
("Mcgee", 0.000657198),
("Reese", 0.000655636),
("Francis", 0.000655353),
("Burgess", 0.000654265),
("Adkins", 0.000653571),
("Goodman", 0.000653151),
("Curry", 0.00065189),
("Brady", 0.000650345),
("Christensen", 0.000650062),
("Potter", 0.000649688),
("Walton", 0.000648719),
("Goodwin", 0.000642652),
("Mullins", 0.000642222),
("Molina", 0.000641537),
("Webster", 0.000640733),
("Fischer", 0.000640477),
("Campos", 0.000639152),
("Avila", 0.000638175),
("Sherman", 0.000638147),
("Todd", 0.000637873),
("Chang", 0.00063738),
("Blake", 0.000633021),
("Malone", 0.00063282),
("Wolf", 0.000629604),
("Hodges", 0.000629266),
("Juarez", 0.000628507),
("Gill", 0.000627722),
("Farmer", 0.000624158),
("Hines", 0.00062266),
("Gallagher", 0.00062202),
("Duran", 0.000621755),
("Hubbard", 0.000621527),
("Cannon", 0.000620631),
("Miranda", 0.0006181),
("Wang", 0.000617406),
("Saunders", 0.000614116),
("Tate", 0.000614098),
("Mack", 0.000613604),
("Hammond", 0.000612773),
("Carrillo", 0.000612691),
("Townsend", 0.000610854),
("Wise", 0.000609803),
("Ingram", 0.000609136),
("Barton", 0.000608743),
("Mejia", 0.000607939),
("Ayala", 0.000607766),
("Schroeder", 0.000606825),
("Hampton", 0.000606514),
("Rowe", 0.000604933),
("Parsons", 0.000604915),
("Frank", 0.000602311),
("Waters", 0.000601388),
("Strickland", 0.000601361),
("Osborne", 0.000601251),
("Maxwell", 0.000601041),
("Chan", 0.000600493),
("Deleon", 0.000599387),
("Norman", 0.000596381),
("Harrington", 0.00059512),
("Casey", 0.000592232),
("Patton", 0.00059184),
("Logan", 0.000590049),
("Bowers", 0.000589318),
("Mueller", 0.000587572),
("Glover", 0.00058643),
("Floyd", 0.000586074),
("Hartman", 0.000583205),
("Buchanan", 0.000583187),
("Cobb", 0.000582401),
("French", 0.00057701),
("Kramer", 0.000575858),
("Mccormick", 0.000572569),
("Clarke", 0.0005715),
("Tyler", 0.00057139),
("Gibbs", 0.000571208),
("Moody", 0.000569654),
("Conner", 0.000569572),
("Sparks", 0.000568649),
("Mcguire", 0.000567571),
("Leon", 0.000566822),
("Bauer", 0.000566319),
("Norton", 0.000564729),
("Pope", 0.000564227),
("Flynn", 0.000564199),
("Hogan", 0.000563322),
("Robles", 0.00056303),
("Salinas", 0.000562692),
("Yates", 0.000561029),
("Lindsey", 0.000559192),
("Lloyd", 0.000558781),
("Marsh", 0.000557365),
("Mcbride", 0.000556222),
("Owen", 0.000552449),
("Solis", 0.000548648),
("Pham", 0.00054777),
("Lang", 0.000546802),
("Pratt", 0.000546418),
("Lara", 0.000545779),
("Brock", 0.000545331),
("Ballard", 0.00054513),
("Trujillo", 0.000544664),
("Shaffer", 0.000541173),
("Drake", 0.000539602),
("Roman", 0.000539282),
("Aguirre", 0.00053835),
("Morton", 0.000537162),
("Stokes", 0.000536239),
("Lamb", 0.000535033),
("Pacheco", 0.000534841),
("Patrick", 0.00053231),
("Cochran", 0.000532091),
("Shepherd", 0.000529368),
("Cain", 0.000528801),
("Burnett", 0.000528674),
("Hess", 0.000528335),
("Li", 0.000528007),
("Cervantes", 0.000527084),
("Olsen", 0.000524087),
("Briggs", 0.000523538),
("Ochoa", 0.000522743),
("Cabrera", 0.000522387),
("Velasquez", 0.000522314),
("Montoya", 0.00052151),
("Roth", 0.000521099),
("Meyers", 0.000518485),
("Cardenas", 0.000517334),
("Fuentes", 0.000515717),
("Weiss", 0.000513085),
("Wilkins", 0.000512309),
("Hoover", 0.000512309),
("Nicholson", 0.000511559),
("Underwood", 0.000511441),
("Short", 0.000510801),
("Carson", 0.000510052),
("Morrow", 0.000508617),
("Colon", 0.000507228),
("Holloway", 0.000506808),
("Summers", 0.000506123),
("Bryan", 0.000505008),
("Petersen", 0.00050424),
("Mckenzie", 0.000503318),
("Serrano", 0.000503071),
("Wilcox", 0.000502431),
("Carey", 0.000501856),
("Clayton", 0.000501408),
("Poole", 0.000499864),
("Calderon", 0.000499727),
("Gallegos", 0.000499553),
("Greer", 0.000498996),
("Rivas", 0.000498786),
("Guerra", 0.000498667),
("Decker", 0.000497525),
("Collier", 0.000497196),
("Wall", 0.000497077),
("Whitaker", 0.000496547),
("Bass", 0.000496117),
("Flowers", 0.000495944),
("Davenport", 0.000495295),
("Conley", 0.000495185),
("Houston", 0.00049365),
("Huff", 0.000492426),
("Copeland", 0.00049132),
("Hood", 0.00049101),
("Monroe", 0.000488616),
("Massey", 0.00048847),
("Roberson", 0.000486085),
("Combs", 0.00048592),
("Franco", 0.000485747),
("Larsen", 0.000483937),
("Pittman", 0.000481434),
("Randall", 0.000479661),
("Skinner", 0.000479616),
("Wilkinson", 0.000479552),
("Kirby", 0.00047946),
("Cameron", 0.00047915),
("Bridges", 0.000477514),
("Anthony", 0.000476472),
("Richard", 0.000476399),
("Kirk", 0.00047565),
("Bruce", 0.000475175),
("Singleton", 0.000473283),
("Mathis", 0.000473274),
("Bradford", 0.000472635),
("Boone", 0.000472205),
("Abbott", 0.000471666),
("Charles", 0.000470734),
("Allison", 0.000470606),
("Sweeney", 0.00047057),
("Atkinson", 0.000470469),
("Horn", 0.000469473),
("Jefferson", 0.0004693),
("Rosales", 0.000469071),
("York", 0.000469053),
("Christian", 0.000467618),
("Phelps", 0.000467408),
("Farrell", 0.000466869),
("Castaneda", 0.000466814),
("Nash", 0.000466193),
("Dickerson", 0.000466156),
("Bond", 0.000465818),
("Wyatt", 0.00046485),
("Foley", 0.000464649),
("Chase", 0.000463963),
("Gates", 0.000463698),
("Vincent", 0.000462602),
("Mathews", 0.000462419),
("Hodge", 0.000462136),
("Garrison", 0.000461268),
("Trevino", 0.000461012),
("Villarreal", 0.000460071),
("Heath", 0.000459669),
("Dalton", 0.00045838),
("Valencia", 0.000457101),
("Callahan", 0.000456178),
("Hensley", 0.000455566),
("Atkins", 0.000454616),
("Huffman", 0.000454461),
("Roy", 0.000454351),
("Boyer", 0.000453218),
("Shields", 0.000452807),
("Lin", 0.000451016),
("Hancock", 0.000450742),
("Grimes", 0.000449965),
("Glenn", 0.000449929),
("Cline", 0.000449252),
("Delacruz", 0.00044917),
("Camacho", 0.000447726),
("Dillon", 0.0004462),
("Parrish", 0.000446109),
("Oneill", 0.000444583),
("Melton", 0.000444017),
("Booth", 0.000443889),
("Kane", 0.000443404),
("Berg", 0.000442975),
("Harrell", 0.000442893),
("Pitts", 0.000442811),
("Savage", 0.000441943),
("Wiggins", 0.000441833),
("Brennan", 0.000441294),
("Salas", 0.000441166),
("Marks", 0.000441157),
("Russo", 0.00043974),
("Sawyer", 0.000438397),
("Baxter", 0.000437283),
("Golden", 0.000437118),
("Hutchinson", 0.000436844),
("Liu", 0.000435528),
("Walter", 0.000435071),
("Mcdowell", 0.000434258),
("Wiley", 0.000434048),
("Rich", 0.00043381),
("Humphrey", 0.000433746),
("Johns", 0.000432093),
("Koch", 0.000432065),
("Suarez", 0.000431599),
("Hobbs", 0.000431462),
("Beard", 0.000430621),
("Gilmore", 0.000429909),
("Ibarra", 0.000428492),
("Keith", 0.00042714),
("Macias", 0.000427067),
("Khan", 0.000426829),
("Andrade", 0.000426729),
("Ware", 0.000426546),
("Stephenson", 0.000426363),
("Henson", 0.000425879),
("Wilkerson", 0.000425843),
("Dyer", 0.000425559),
("Mcclure", 0.000424929),
("Blackwell", 0.000424838),
("Mercado", 0.000424308),
("Tanner", 0.000424079),
("Eaton", 0.000423997),
("Clay", 0.000422727),
("Barron", 0.000422106),
("Beasley", 0.00042195),
("Oneal", 0.000421786),
("Small", 0.000418944),
("Preston", 0.000418944),
("Wu", 0.000418624),
("Zamora", 0.000418542),
("Macdonald", 0.000418323),
("Vance", 0.000418149),
("Snow", 0.000417473),
("Mcclain", 0.000416294),
("Stafford", 0.000414366),
("Orozco", 0.000413818),
("Barry", 0.000411579),
("English", 0.00041147),
("Shannon", 0.000410282),
("Kline", 0.000410264),
("Jacobson", 0.000410026),
("Woodard", 0.000409624),
("Huang", 0.000408573),
("Kemp", 0.000408445),
("Mosley", 0.000408418),
("Prince", 0.000407888),
("Merritt", 0.00040776),
("Hurst", 0.000407404),
("Villanueva", 0.000407248),
("Roach", 0.000406188),
("Nolan", 0.000405887),
("Lam", 0.000405558),
("Yoder", 0.000404279),
("Mccullough", 0.000403164),
("Lester", 0.0004013),
("Santana", 0.000400898),
("Valenzuela", 0.000399938),
("Winters", 0.000399865),
("Barrera", 0.000399482),
("Orr", 0.000398988),
("Leach", 0.000398988),
("Berger", 0.000397983),
("Mckee", 0.000397974),
("Strong", 0.000396832),
("Conway", 0.000396512),
("Stein", 0.000395927),
("Whitehead", 0.000395735),
("Bullock", 0.000393095),
("Escobar", 0.000392492),
("Knox", 0.000392327),
("Meadows", 0.000391843),
("Solomon", 0.000391432),
("Velez", 0.000391258),
("Odonnell", 0.000391094),
("Kerr", 0.000390692),
("Stout", 0.000389878),
("Blankenship", 0.000389824),
("Browning", 0.000389632),
("Kent", 0.00038922),
("Lozano", 0.000388946),
("Bartlett", 0.000388444),
("Pruitt", 0.000387996),
("Buck", 0.000387795),
("Barr", 0.000387713),
("Gaines", 0.000387137),
("Durham", 0.000387101),
("Gentry", 0.000387028),
("Mcintyre", 0.000386826),
("Sloan", 0.000386333),
("Rocha", 0.000385036),
("Melendez", 0.000385036),
("Herman", 0.000384597),
("Sexton", 0.000384496),
("Moon", 0.000384332),
("Hendricks", 0.00038266),
("Rangel", 0.000382559),
("Stark", 0.000382514),
("Lowery", 0.00038075),
("Hardin", 0.000380695),
("Hull", 0.000380622),
("Sellers", 0.000379754),
("Ellison", 0.000378822),
("Calhoun", 0.000378758),
("Gillespie", 0.000378219),
("Mora", 0.000377808),
("Knapp", 0.000377068),
("Mccall", 0.000376739),
("Morse", 0.000375652),
("Dorsey", 0.000375579),
("Weeks", 0.000375113),
("Nielsen", 0.000374692),
("Livingston", 0.000374299),
("Leblanc", 0.000373925),
("Mclean", 0.00037345),
("Bradshaw", 0.000372746),
("Glass", 0.000372106),
("Middleton", 0.00037196),
("Buckley", 0.000371942),
("Schaefer", 0.000371549),
("Frost", 0.000370809),
("Howe", 0.000370562),
("House", 0.000369849),
("Mcintosh", 0.00036963),
("Ho", 0.000369265),
("Pennington", 0.000368588),
("Reilly", 0.000368324),
("Hebert", 0.000368077),
("Mcfarland", 0.00036772),
("Hickman", 0.000367538),
("Noble", 0.000367474),
("Spears", 0.000367346),
("Conrad", 0.000366423),
("Arias", 0.000366277),
("Galvan", 0.000365911),
("Velazquez", 0.000365765),
("Huynh", 0.000365591),
("Frederick", 0.000364659),
("Randolph", 0.000363134),
("Cantu", 0.000361845),
("Fitzpatrick", 0.000360931),
("Mahoney", 0.000360374),
("Peck", 0.000360301),
("Villa", 0.000360027),
("Michael", 0.000359725),
("Donovan", 0.000358821),
("Mcconnell", 0.000358209),
("Walls", 0.00035787),
("Boyle", 0.000357642),
("Mayer", 0.000357368),
("Zuniga", 0.000356875),
("Giles", 0.000356372),
("Pineda", 0.000356345),
("Pace", 0.000356125),
("Hurley", 0.000356089),
("Mays", 0.000355568),
("Mcmillan", 0.000355403),
("Crosby", 0.000354928),
("Ayers", 0.000354855),
("Case", 0.000354152),
("Bentley", 0.00035374),
("Shepard", 0.000353658),
("Everett", 0.000353631),
("Pugh", 0.00035353),
("David", 0.000353238),
("Mcmahon", 0.000352306),
("Dunlap", 0.000351931),
("Bender", 0.000351456),
("Hahn", 0.000350451),
("Harding", 0.000350323),
("Acevedo", 0.000349336),
("Raymond", 0.00034866),
("Blackburn", 0.000348468),
("Duffy", 0.000346869),
("Landry", 0.00034686),
("Dougherty", 0.00034633),
("Bautista", 0.000345818),
("Shah", 0.00034569),
("Potts", 0.000344356),
("Arroyo", 0.000344274),
("Valentine", 0.000344192),
("Meza", 0.000344128),
("Gould", 0.00034411),
("Vaughan", 0.000343479),
("Fry", 0.000343032),
("Rush", 0.000342374),
("Avery", 0.0003421),
("Herring", 0.000341305),
("Dodson", 0.000340802),
("Clements", 0.000340245),
("Sampson", 0.000340217),
("Tapia", 0.000339916),
("Bean", 0.000339404),
("Lynn", 0.000339221),
("Crane", 0.000339203),
("Farley", 0.000339139),
("Cisneros", 0.000338536),
("Benton", 0.000338372),
("Ashley", 0.000338271),
("Mckay", 0.000337604),
("Finley", 0.000336928),
("Best", 0.000336818),
("Blevins", 0.000336626),
("Friedman", 0.000336553),
("Moses", 0.00033638),
("Sosa", 0.00033637),
("Blanchard", 0.000335923),
("Huber", 0.000335603),
("Frye", 0.000335484),
("Krueger", 0.000335283),
("Bernard", 0.000333931),
("Rosario", 0.000333867),
("Rubio", 0.000333794),
("Mullen", 0.000332981),
("Benjamin", 0.000332953),
("Haley", 0.000332898),
("Chung", 0.000332798),
("Moyer", 0.000332789),
("Choi", 0.000332505),
("Horne", 0.000331573),
("Yu", 0.000331546),
("Woodward", 0.000331153),
("Ali", 0.000329664),
("Nixon", 0.00032928),
("Hayden", 0.000329161),
("Rivers", 0.000328759),
("Estes", 0.000327471),
("Mccarty", 0.000326365),
("Richmond", 0.000326338),
("Stuart", 0.00032621),
("Maynard", 0.000325726),
("Brandt", 0.000325433),
("Oconnell", 0.000325378),
("Hanna", 0.000325278),
("Sanford", 0.000324967),
("Sheppard", 0.000324867),
("Church", 0.00032473),
("Burch", 0.000324565),
("Levy", 0.000324044),
("Rasmussen", 0.000323944),
("Coffey", 0.000323843),
("Ponce", 0.000323459),
("Faulkner", 0.000323359),
("Donaldson", 0.000323341),
("Schmitt", 0.000322783),
("Novak", 0.000322381),
("Costa", 0.000321879),
("Montes", 0.000321595),
("Booker", 0.000320727),
("Cordova", 0.000320481),
("Waller", 0.000319814),
("Arellano", 0.000319795),
("Maddox", 0.00031953),
("Mata", 0.000318781),
("Bonilla", 0.000318196),
("Stanton", 0.000318087),
("Compton", 0.000317867),
("Kaufman", 0.000317849),
("Dudley", 0.000317703),
("Mcpherson", 0.000317639),
("Beltran", 0.000317392),
("Dickson", 0.000317045),
("Mccann", 0.00031699),
("Villegas", 0.000316917),
("Proctor", 0.000316899),
("Hester", 0.000316835),
("Cantrell", 0.000316826),
("Daugherty", 0.000316607),
("Cherry", 0.000316287),
("Bray", 0.000315921),
("Davila", 0.000315611),
("Rowland", 0.000315218),
("Madden", 0.00031498),
("Levine", 0.00031498),
("Spence", 0.000314642),
("Good", 0.000314596),
("Irwin", 0.000314085),
("Werner", 0.000313884),
("Krause", 0.00031382),
("Petty", 0.000313207),
("Whitney", 0.000312961),
("Baird", 0.000312796),
("Hooper", 0.000311435),
("Pollard", 0.000311389),
("Zavala", 0.000311289),
("Jarvis", 0.000311124),
("Holden", 0.000311042),
("Hendrix", 0.00031096),
("Haas", 0.00031096),
("Mcgrath", 0.000310951),
("Bird", 0.00031032),
("Lucero", 0.000309955),
("Terrell", 0.000309882),
("Riggs", 0.000309461),
("Joyce", 0.000309233),
("Rollins", 0.000308812),
("Mercer", 0.000308812),
("Galloway", 0.000308593),
("Duke", 0.000308337),
("Odom", 0.000308081),
("Andersen", 0.000306172),
("Downs", 0.000306044),
("Hatfield", 0.00030577),
("Benitez", 0.00030556),
("Archer", 0.000305285),
("Huerta", 0.00030471),
("Travis", 0.000304628),
("Mcneil", 0.000303714),
("Hinton", 0.00030344),
("Zhang", 0.000303376),
("Hays", 0.000303303),
("Mayo", 0.000302681),
("Fritz", 0.000302151),
("Branch", 0.000301896),
("Mooney", 0.000301101),
("Ewing", 0.000300845),
("Ritter", 0.000300287),
("Esparza", 0.000299447),
("Frey", 0.000299109),
("Braun", 0.00029857),
("Gay", 0.000298533),
("Riddle", 0.000298369),
("Haney", 0.000298277),
("Kaiser", 0.000297574),
("Holder", 0.000296651),
("Chaney", 0.000296349),
("Mcknight", 0.00029592),
("Gamble", 0.000295838),
("Vang", 0.000295435),
("Cooley", 0.000295015),
("Carney", 0.000294969),
("Cowan", 0.000294604),
("Forbes", 0.000294476),
("Ferrell", 0.000293983),
("Davies", 0.0002939),
("Barajas", 0.000293736),
("Shea", 0.000293023),
("Osborn", 0.000292795),
("Bright", 0.000292777),
("Cuevas", 0.00029253),
("Bolton", 0.000292347),
("Murillo", 0.000292064),
("Lutz", 0.000291845),
("Duarte", 0.000291442),
("Kidd", 0.000291351),
("Key", 0.000291315),
("Cooke", 0.000291114),
)
)
prefixes_female = OrderedDict(
(
("Mrs.", 0.5),
("Ms.", 0.1),
("Miss", 0.1),
("Dr.", 0.3),
)
)
prefixes_male = OrderedDict(
(
("Mr.", 0.7),
("Dr.", 0.3),
)
)
# https://en.wikipedia.org/wiki/Gender-neutral_title
prefixes_nonbinary = OrderedDict(
(
("Mx.", 0.5),
("Ind.", 0.1),
("Misc.", 0.1),
("Dr.", 0.3),
)
)
suffixes_female = OrderedDict(
(
("MD", 0.5),
("DDS", 0.3),
("PhD", 0.1),
("DVM", 0.2),
)
)
# Removed Sr and I as they'd almost never be part of legal names.
suffixes_male = OrderedDict(
(
("Jr.", 0.2),
("II", 0.05),
("III", 0.03),
("IV", 0.015),
("V", 0.005),
("MD", 0.3),
("DDS", 0.2),
("PhD", 0.1),
("DVM", 0.1),
)
)
suffixes_nonbinary = suffixes_male.copy()
|
joke2k/faker
|
faker/providers/person/en_US/__init__.py
|
Python
|
mit
| 66,194
|
[
"Amber",
"Brian",
"CRYSTAL",
"Dalton"
] |
6f8e0ea280dd8875f06c804c1855c7da01df8db04d51f16be1877aa11c3bfe58
|
## @example surface_extraction_gui_with_pyside2.py
# This example demonstrates how to use FAST together with Qt Python GUI with PySide2.
# The GUI application performs Gaussian smoothing and marching cubes surface extraction on a CT thorax volume.
#
# @m_class{m-block m-warning} @par PySide2 Qt Version
# @parblock
# For this example you <b>must</b> use the same Qt version of PySide2 as used in FAST (5.14.0)
# Do this with: <b>pip install pyside2==5.14.0</b>
# @endparblock
#
# @image html images/examples/python/pyside_surface_extraction.jpg
from PySide2.QtWidgets import *
from PySide2.QtCore import Slot
from PySide2.QtCore import Qt
import PySide2.QtSvg # Must import this before fast due to conflicting symMust import this before fast due to conflicting symbols
from shiboken2 import wrapInstance
from random import random
import fast
fast.downloadTestDataIfNotExists()
# Create FAST Pipeline and window
importer = fast.ImageFileImporter\
.create(fast.Config.getTestDataPath() + 'CT/CT-Abdomen.mhd')
smoothing = fast.GaussianSmoothing\
.create(stdDev=1.0)\
.connect(importer)
surfaceExtraction = fast.SurfaceExtraction\
.create(threshold=300)\
.connect(smoothing)
renderer = fast.TriangleRenderer.create()\
.connect(surfaceExtraction)
window = fast.SimpleWindow3D.create(width=1024, height=512)\
.connect(renderer)
# Get the underlying QtWidget of the FAST window and convert it to pyside2
mainWidget = wrapInstance(int(window.getWidget()), QWidget)
# Create GUI in Qt
layout = mainWidget.layout()
menuWidget = QWidget()
layout.addWidget(menuWidget)
menuLayout = QVBoxLayout()
menuWidget.setLayout(menuLayout)
menuLayout.setAlignment(Qt.AlignTop)
title = QLabel('<h3>Python GUI Example</h3>')
menuWidget.setFixedWidth(400)
menuLayout.addWidget(title)
# Threshold GUI
menuLayout.addWidget(QLabel('Threshold:'))
threshold_slider = QSlider(Qt.Horizontal)
threshold_slider.setRange(100, 500)
threshold_slider.setValue(300)
threshold_slider.setSingleStep(10)
# Connect slider to FAST
threshold_slider.valueChanged.connect(lambda x: surfaceExtraction.setThreshold(x))
menuLayout.addWidget(threshold_slider)
# Smoothing GUI
menuLayout.addWidget(QLabel('Smoothing:'))
smoothing_slider = QSlider(Qt.Horizontal)
smoothing_slider.setValue(1)
smoothing_slider.setRange(1, 3)
smoothing_slider.setSingleStep(1)
# Connect slider to FAST
smoothing_slider.valueChanged.connect(lambda x: smoothing.setStandardDeviation(x))
menuLayout.addWidget(smoothing_slider)
# Run everything!
window.run()
|
smistad/FAST
|
source/FAST/Examples/Python/surface_extraction_gui_with_pyside2.py
|
Python
|
bsd-2-clause
| 2,553
|
[
"Gaussian"
] |
781c459231a304177c3cba4b42b750dd4515a72c90e95d561f0b396a0619b40d
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Brian C. Lane <bcl@redhat.com>
from pyanaconda.iutil import DataHolder
import unittest
class DataHolderTests(unittest.TestCase):
def dataholder_test(self):
"""Test the DataHolder class"""
source = {"name": "Minion", "color": "Yellow", "size": 3}
data = DataHolder(**source)
# test that init keywords show up as attrs
self.assertTrue(all([getattr(data, s) == source[s] for s in source]))
# test that init keywords show as keys
self.assertTrue(all([data[s] == source[s] for s in source]))
# test that adding an attr shows as a key
data.master = "Gru"
self.assertEquals(data["master"], "Gru")
# test that adding a key shows as an attr
data["sibling"] = "More Minions"
self.assertEquals(data.sibling, "More Minions")
# test that a copy results in the same key/values
data_copy = data.copy()
self.assertEquals(data, data_copy)
|
maxamillion/anaconda
|
tests/pyanaconda_tests/dataholder_test.py
|
Python
|
gpl-2.0
| 1,964
|
[
"Brian"
] |
650adb753fbbedf8ebc7b678a25bd2c1cd092957899489e6a30fd7da34f83fc0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.